summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/Kconfig4
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug18
-rw-r--r--drivers/gpu/drm/i915/Makefile192
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c44
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c66
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c4
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c12
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c40
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c160
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc_state_dump.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c42
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c251
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h16
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c230
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c589
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_core.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c237
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.c176
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs_params.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.c217
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_params.h61
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h45
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c106
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c514
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c99
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_regs.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c47
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c662
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c171
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c270
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c98
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.c82
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.h29
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c368
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c168
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c112
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c115
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.c30
-rw-r--r--drivers/gpu/drm/i915/display/intel_link_bw.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_verify.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c471
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_qp_tables.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c25
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vblank.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c109
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c5
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c31
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h7
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c27
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c21
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c14
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_engine_cs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c46
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_regs.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c23
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gsc.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_mcr.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h38
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c26
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h5
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c100
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c96
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_cs.c20
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_gt_pm.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c65
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rps.c17
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_slpc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h79
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c115
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c112
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c14
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.c108
-rw-r--r--drivers/gpu/drm/i915/i915_drm_client.h42
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h20
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c199
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h34
-rw-r--r--drivers/gpu/drm/i915/i915_memcpy.c2
-rw-r--r--drivers/gpu/drm/i915/i915_params.c89
-rw-r--r--drivers/gpu/drm/i915/i915_params.h22
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c2
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h9
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c77
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c79
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c50
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c2
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c19
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h1
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c243
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h13
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.c35
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h73
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c18
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c5
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_syncmap.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c2
-rw-r--r--drivers/gpu/drm/i915/soc/intel_gmch.c27
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c29
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h9
210 files changed, 6160 insertions, 3312 deletions
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index ce397a8797..3089029abb 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -94,7 +94,7 @@ config DRM_I915_CAPTURE_ERROR
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
Please report any hang for triaging according to:
- https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
+ https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html
If in doubt, say "Y".
@@ -140,7 +140,7 @@ config DRM_I915_GVT_KVMGT
Note that this driver only supports newer device from Broadwell on.
For further information and setup guide, you can visit:
- http://01.org/igvt-g.
+ https://github.com/intel/gvt-linux/wiki.
If in doubt, say "N".
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 2d21930d55..5b71620768 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -24,7 +24,9 @@ config DRM_I915_DEBUG
select DEBUG_FS
select PREEMPT_COUNT
select I2C_CHARDEV
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
select DRM_DP_AUX_CHARDEV
select X86_MSR # used by igt/pm_rpm
select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
@@ -38,6 +40,7 @@ config DRM_I915_DEBUG
select DRM_I915_DEBUG_GEM_ONCE
select DRM_I915_DEBUG_MMIO
select DRM_I915_DEBUG_RUNTIME_PM
+ select DRM_I915_DEBUG_WAKEREF
select DRM_I915_SW_FENCE_DEBUG_OBJECTS
select DRM_I915_SELFTEST
default n
@@ -231,7 +234,9 @@ config DRM_I915_DEBUG_RUNTIME_PM
bool "Enable extra state checking for runtime PM"
depends on DRM_I915
default n
+ select REF_TRACKER
select STACKDEPOT
+ select STACKTRACE
help
Choose this option to turn on extra state checking for the
runtime PM functionality. This may introduce overhead during
@@ -240,3 +245,16 @@ config DRM_I915_DEBUG_RUNTIME_PM
Recommended for driver developers only.
If in doubt, say "N"
+
+config DRM_I915_DEBUG_WAKEREF
+ bool "Enable extra tracking for wakerefs"
+ depends on DRM_I915
+ select REF_TRACKER
+ select STACKDEPOT
+ select STACKTRACE
+ help
+ Choose this option to turn on extra state checking and usage
+ tracking for the wakerefPM functionality. This may introduce
+ overhead during driver runtime.
+
+ If in doubt, say "N"
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 88b2bb0050..2d08baa91d 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
# The following turn off the warnings enabled by -Wextra
ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
@@ -34,9 +33,9 @@ endif
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
# Fine grained warnings disable
-CFLAGS_i915_pci.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_display_device.o = $(call cc-disable-warning, override-init)
-CFLAGS_display/intel_fbdev.o = $(call cc-disable-warning, override-init)
+CFLAGS_i915_pci.o = -Wno-override-init
+CFLAGS_display/intel_display_device.o = -Wno-override-init
+CFLAGS_display/intel_fbdev.o = -Wno-override-init
# Support compiling the display code separately for both i915 and xe
# drivers. Define I915 when building i915.
@@ -47,33 +46,34 @@ subdir-ccflags-y += -I$(srctree)/$(src)
# Please keep these build lists sorted!
# core driver code
-i915-y += i915_driver.o \
- i915_drm_client.o \
- i915_config.o \
- i915_getparam.o \
- i915_ioctl.o \
- i915_irq.o \
- i915_mitigations.o \
- i915_module.o \
- i915_params.o \
- i915_pci.o \
- i915_scatterlist.o \
- i915_suspend.o \
- i915_switcheroo.o \
- i915_sysfs.o \
- i915_utils.o \
- intel_clock_gating.o \
- intel_device_info.o \
- intel_memory_region.o \
- intel_pcode.o \
- intel_region_ttm.o \
- intel_runtime_pm.o \
- intel_sbi.o \
- intel_step.o \
- intel_uncore.o \
- intel_wakeref.o \
- vlv_sideband.o \
- vlv_suspend.o
+i915-y += \
+ i915_config.o \
+ i915_driver.o \
+ i915_drm_client.o \
+ i915_getparam.o \
+ i915_ioctl.o \
+ i915_irq.o \
+ i915_mitigations.o \
+ i915_module.o \
+ i915_params.o \
+ i915_pci.o \
+ i915_scatterlist.o \
+ i915_suspend.o \
+ i915_switcheroo.o \
+ i915_sysfs.o \
+ i915_utils.o \
+ intel_clock_gating.o \
+ intel_device_info.o \
+ intel_memory_region.o \
+ intel_pcode.o \
+ intel_region_ttm.o \
+ intel_runtime_pm.o \
+ intel_sbi.o \
+ intel_step.o \
+ intel_uncore.o \
+ intel_wakeref.o \
+ vlv_sideband.o \
+ vlv_suspend.o
# core peripheral code
i915-y += \
@@ -90,13 +90,13 @@ i915-y += \
i915_syncmap.o \
i915_user_extensions.o
-i915-$(CONFIG_COMPAT) += i915_ioc32.o
+i915-$(CONFIG_COMPAT) += \
+ i915_ioc32.o
i915-$(CONFIG_DEBUG_FS) += \
i915_debugfs.o \
- i915_debugfs_params.o \
- display/intel_display_debugfs.o \
- display/intel_pipe_crc.o
-i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
+ i915_debugfs_params.o
+i915-$(CONFIG_PERF_EVENTS) += \
+ i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
gt-y += \
@@ -118,6 +118,7 @@ gt-y += \
gt/intel_ggtt_fencing.o \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
+ gt/intel_gt_ccs_mode.o \
gt/intel_gt_clock_utils.o \
gt/intel_gt_debugfs.o \
gt/intel_gt_engines_debugfs.o \
@@ -153,7 +154,8 @@ gt-y += \
gt/sysfs_engines.o
# x86 intel-gtt module support
-gt-$(CONFIG_X86) += gt/intel_ggtt_gmch.o
+gt-$(CONFIG_X86) += \
+ gt/intel_ggtt_gmch.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -172,9 +174,9 @@ gem-y += \
gem/i915_gem_domain.o \
gem/i915_gem_execbuffer.o \
gem/i915_gem_internal.o \
- gem/i915_gem_object.o \
gem/i915_gem_lmem.o \
gem/i915_gem_mman.o \
+ gem/i915_gem_object.o \
gem/i915_gem_pages.o \
gem/i915_gem_phys.o \
gem/i915_gem_pm.o \
@@ -191,57 +193,61 @@ gem-y += \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
i915-y += \
- $(gem-y) \
- i915_active.o \
- i915_cmd_parser.o \
- i915_deps.o \
- i915_gem_evict.o \
- i915_gem_gtt.o \
- i915_gem_ww.o \
- i915_gem.o \
- i915_query.o \
- i915_request.o \
- i915_scheduler.o \
- i915_trace_points.o \
- i915_ttm_buddy_manager.o \
- i915_vma.o \
- i915_vma_resource.o
+ $(gem-y) \
+ i915_active.o \
+ i915_cmd_parser.o \
+ i915_deps.o \
+ i915_gem.o \
+ i915_gem_evict.o \
+ i915_gem_gtt.o \
+ i915_gem_ww.o \
+ i915_query.o \
+ i915_request.o \
+ i915_scheduler.o \
+ i915_trace_points.o \
+ i915_ttm_buddy_manager.o \
+ i915_vma.o \
+ i915_vma_resource.o
# general-purpose microcontroller (GuC) support
i915-y += \
- gt/uc/intel_gsc_fw.o \
- gt/uc/intel_gsc_proxy.o \
- gt/uc/intel_gsc_uc.o \
- gt/uc/intel_gsc_uc_debugfs.o \
- gt/uc/intel_gsc_uc_heci_cmd_submit.o \
- gt/uc/intel_guc.o \
- gt/uc/intel_guc_ads.o \
- gt/uc/intel_guc_capture.o \
- gt/uc/intel_guc_ct.o \
- gt/uc/intel_guc_debugfs.o \
- gt/uc/intel_guc_fw.o \
- gt/uc/intel_guc_hwconfig.o \
- gt/uc/intel_guc_log.o \
- gt/uc/intel_guc_log_debugfs.o \
- gt/uc/intel_guc_rc.o \
- gt/uc/intel_guc_slpc.o \
- gt/uc/intel_guc_submission.o \
- gt/uc/intel_huc.o \
- gt/uc/intel_huc_debugfs.o \
- gt/uc/intel_huc_fw.o \
- gt/uc/intel_uc.o \
- gt/uc/intel_uc_debugfs.o \
- gt/uc/intel_uc_fw.o
+ gt/uc/intel_gsc_fw.o \
+ gt/uc/intel_gsc_proxy.o \
+ gt/uc/intel_gsc_uc.o \
+ gt/uc/intel_gsc_uc_debugfs.o \
+ gt/uc/intel_gsc_uc_heci_cmd_submit.o\
+ gt/uc/intel_guc.o \
+ gt/uc/intel_guc_ads.o \
+ gt/uc/intel_guc_capture.o \
+ gt/uc/intel_guc_ct.o \
+ gt/uc/intel_guc_debugfs.o \
+ gt/uc/intel_guc_fw.o \
+ gt/uc/intel_guc_hwconfig.o \
+ gt/uc/intel_guc_log.o \
+ gt/uc/intel_guc_log_debugfs.o \
+ gt/uc/intel_guc_rc.o \
+ gt/uc/intel_guc_slpc.o \
+ gt/uc/intel_guc_submission.o \
+ gt/uc/intel_huc.o \
+ gt/uc/intel_huc_debugfs.o \
+ gt/uc/intel_huc_fw.o \
+ gt/uc/intel_uc.o \
+ gt/uc/intel_uc_debugfs.o \
+ gt/uc/intel_uc_fw.o
# graphics system controller (GSC) support
-i915-y += gt/intel_gsc.o
+i915-y += \
+ gt/intel_gsc.o
# graphics hardware monitoring (HWMON) support
-i915-$(CONFIG_HWMON) += i915_hwmon.o
+i915-$(CONFIG_HWMON) += \
+ i915_hwmon.o
# modesetting core code
i915-y += \
display/hsw_ips.o \
+ display/i9xx_plane.o \
+ display/i9xx_wm.o \
display/intel_atomic.o \
display/intel_atomic_plane.o \
display/intel_audio.o \
@@ -257,6 +263,7 @@ i915-y += \
display/intel_display.o \
display/intel_display_driver.o \
display/intel_display_irq.o \
+ display/intel_display_params.o \
display/intel_display_power.o \
display/intel_display_power_map.o \
display/intel_display_power_well.o \
@@ -268,9 +275,12 @@ i915-y += \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dpt.o \
+ display/intel_dpt_common.o \
display/intel_drrs.o \
display/intel_dsb.o \
+ display/intel_dsb_buffer.o \
display/intel_fb.o \
+ display/intel_fb_bo.o \
display/intel_fb_pin.o \
display/intel_fbc.o \
display/intel_fdi.o \
@@ -287,8 +297,8 @@ i915-y += \
display/intel_load_detect.o \
display/intel_lpe_audio.o \
display/intel_modeset_lock.o \
- display/intel_modeset_verify.o \
display/intel_modeset_setup.o \
+ display/intel_modeset_verify.o \
display/intel_overlay.o \
display/intel_pch_display.o \
display/intel_pch_refclk.o \
@@ -302,8 +312,6 @@ i915-y += \
display/intel_vblank.o \
display/intel_vga.o \
display/intel_wm.o \
- display/i9xx_plane.o \
- display/i9xx_wm.o \
display/skl_scaler.o \
display/skl_universal_plane.o \
display/skl_watermark.o
@@ -311,7 +319,12 @@ i915-$(CONFIG_ACPI) += \
display/intel_acpi.o \
display/intel_opregion.o
i915-$(CONFIG_DRM_FBDEV_EMULATION) += \
- display/intel_fbdev.o
+ display/intel_fbdev.o \
+ display/intel_fbdev_fb.o
+i915-$(CONFIG_DEBUG_FS) += \
+ display/intel_display_debugfs.o \
+ display/intel_display_debugfs_params.o \
+ display/intel_pipe_crc.o
# modesetting output/encoder code
i915-y += \
@@ -357,13 +370,14 @@ i915-y += \
display/vlv_dsi.o \
display/vlv_dsi_pll.o
-i915-y += i915_perf.o
+i915-y += \
+ i915_perf.o
# Protected execution platform (PXP) support. Base support is required for HuC
i915-y += \
pxp/intel_pxp.o \
- pxp/intel_pxp_tee.o \
- pxp/intel_pxp_huc.o
+ pxp/intel_pxp_huc.o \
+ pxp/intel_pxp_tee.o
i915-$(CONFIG_DRM_I915_PXP) += \
pxp/intel_pxp_cmd.o \
@@ -374,11 +388,11 @@ i915-$(CONFIG_DRM_I915_PXP) += \
pxp/intel_pxp_session.o
# Post-mortem debug and GPU hang state capture
-i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
+i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += \
+ i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
gem/selftests/i915_gem_client_blt.o \
gem/selftests/igt_gem_utils.o \
- selftests/intel_scheduler_helpers.o \
selftests/i915_random.o \
selftests/i915_selftest.o \
selftests/igt_atomic.o \
@@ -387,10 +401,12 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
selftests/igt_mmap.o \
selftests/igt_reset.o \
selftests/igt_spinner.o \
+ selftests/intel_scheduler_helpers.o \
selftests/librapl.o
# virtual gpu code
-i915-y += i915_vgpu.o
+i915-y += \
+ i915_vgpu.o
i915-$(CONFIG_DRM_I915_GVT) += \
intel_gvt.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index e8ee0a0894..06ec04e667 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -432,7 +432,7 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
- intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_dp->DP &= ~DP_PORT_EN;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
@@ -475,6 +475,40 @@ intel_dp_link_down(struct intel_encoder *encoder,
}
}
+static void g4x_dp_audio_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ /* Enable audio presence detect */
+ intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
+ intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
+
+static void g4x_dp_audio_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ /* Disable audio presence detect */
+ intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE;
+ intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+}
+
static void intel_disable_dp(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -484,8 +518,6 @@ static void intel_disable_dp(struct intel_atomic_state *state,
intel_dp->link_trained = false;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
/*
* Make sure the panel is off before trying to change the mode.
* But also ensure that we have vdd while we switch off the panel.
@@ -631,8 +663,6 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
* fail when the power sequencer is freshly used for this port.
*/
intel_dp->DP |= DP_PORT_EN;
- if (crtc_state->has_audio)
- intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
@@ -686,7 +716,6 @@ static void g4x_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
intel_enable_dp(state, encoder, pipe_config, conn_state);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
@@ -695,7 +724,6 @@ static void vlv_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
intel_edp_backlight_on(pipe_config, conn_state);
}
@@ -1325,6 +1353,8 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->disable = g4x_disable_dp;
intel_encoder->post_disable = g4x_post_disable_dp;
}
+ intel_encoder->audio_enable = g4x_dp_audio_enable;
+ intel_encoder->audio_disable = g4x_dp_audio_disable;
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A))
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 45e044b4a8..8096492b3f 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -228,25 +228,51 @@ static void g4x_hdmi_enable_port(struct intel_encoder *encoder,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
+static void g4x_hdmi_audio_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!crtc_state->has_audio)
+ return;
+
+ drm_WARN_ON(&i915->drm, !crtc_state->has_hdmi_sink);
+
+ /* Enable audio presence detect */
+ intel_de_rmw(i915, hdmi->hdmi_reg, 0, HDMI_AUDIO_ENABLE);
+
+ intel_audio_codec_enable(encoder, crtc_state, conn_state);
+}
+
+static void g4x_hdmi_audio_disable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_hdmi *hdmi = enc_to_intel_hdmi(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
+ intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+
+ /* Disable audio presence detect */
+ intel_de_rmw(i915, hdmi->hdmi_reg, HDMI_AUDIO_ENABLE, 0);
+}
+
static void g4x_enable_hdmi(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
g4x_hdmi_enable_port(encoder, pipe_config);
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void ibx_enable_hdmi(struct intel_atomic_state *state,
@@ -262,8 +288,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
/*
* HW workaround, need to write this twice for issue
@@ -296,10 +320,6 @@ static void ibx_enable_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void cpt_enable_hdmi(struct intel_atomic_state *state,
@@ -317,8 +337,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
- if (pipe_config->has_audio)
- temp |= HDMI_AUDIO_ENABLE;
/*
* WaEnableHDMI8bpcBefore12bpc:snb,ivb
@@ -351,10 +369,6 @@ static void cpt_enable_hdmi(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE, 0);
}
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void vlv_enable_hdmi(struct intel_atomic_state *state,
@@ -362,11 +376,6 @@ static void vlv_enable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
-
- drm_WARN_ON(&dev_priv->drm, pipe_config->has_audio &&
- !pipe_config->has_hdmi_sink);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
static void intel_disable_hdmi(struct intel_atomic_state *state,
@@ -384,7 +393,7 @@ static void intel_disable_hdmi(struct intel_atomic_state *state,
temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
- temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
+ temp &= ~SDVO_ENABLE;
intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
@@ -433,8 +442,6 @@ static void g4x_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
intel_disable_hdmi(state, encoder, old_crtc_state, old_conn_state);
}
@@ -443,7 +450,6 @@ static void pch_disable_hdmi(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
}
static void pch_post_disable_hdmi(struct intel_atomic_state *state,
@@ -750,6 +756,8 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
else
intel_encoder->enable = g4x_enable_hdmi;
}
+ intel_encoder->audio_enable = g4x_hdmi_audio_enable;
+ intel_encoder->audio_disable = g4x_hdmi_audio_disable;
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
intel_encoder->type = INTEL_OUTPUT_HDMI;
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 7dc38ac020..611a7d6ef8 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -193,7 +193,7 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
if (!hsw_crtc_supports_ips(crtc))
return false;
- if (!i915->params.enable_ips)
+ if (!i915->display.params.enable_ips)
return false;
if (crtc_state->pipe_bpp > 24)
@@ -329,7 +329,7 @@ static int hsw_ips_debugfs_status_show(struct seq_file *m, void *unused)
wakeref = intel_runtime_pm_get(&i915->runtime_pm);
seq_printf(m, "Enabled by kernel parameter: %s\n",
- str_yes_no(i915->params.enable_ips));
+ str_yes_no(i915->display.params.enable_ips));
if (DISPLAY_VER(i915) >= 8) {
seq_puts(m, "Currently: unknown\n");
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index af0c79a4c9..11ca9572e8 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -608,7 +608,7 @@ static bool intel_crtc_active(struct intel_crtc *crtc)
* crtc->state->active once we have proper CRTC states wired up
* for atomic.
*/
- return crtc && crtc->active && crtc->base.primary->state->fb &&
+ return crtc->active && crtc->base.primary->state->fb &&
crtc->config->hw.adjusted_mode.crtc_clock;
}
@@ -2477,7 +2477,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv,
* FIFO size is only half of the self
* refresh FIFO size on ILK/SNB.
*/
- if (DISPLAY_VER(dev_priv) <= 6)
+ if (DISPLAY_VER(dev_priv) < 7)
fifo_size /= 2;
}
@@ -2818,7 +2818,7 @@ static int ilk_compute_pipe_wm(struct intel_atomic_state *state,
usable_level = dev_priv->display.wm.num_levels - 1;
/* ILK/SNB: LP2+ watermarks only w/o sprites */
- if (DISPLAY_VER(dev_priv) <= 6 && pipe_wm->sprites_enabled)
+ if (DISPLAY_VER(dev_priv) < 7 && pipe_wm->sprites_enabled)
usable_level = 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
@@ -2961,7 +2961,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
int last_enabled_level = num_levels - 1;
/* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
- if ((DISPLAY_VER(dev_priv) <= 6 || IS_IVYBRIDGE(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) < 7 || IS_IVYBRIDGE(dev_priv)) &&
config->num_pipes_active > 1)
last_enabled_level = 0;
@@ -2993,7 +2993,7 @@ static void ilk_wm_merge(struct drm_i915_private *dev_priv,
/* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
if (DISPLAY_VER(dev_priv) == 5 && HAS_FBC(dev_priv) &&
- dev_priv->params.enable_fbc && !merged->fbc_wm_enabled) {
+ dev_priv->display.params.enable_fbc && !merged->fbc_wm_enabled) {
for (level = 2; level < num_levels; level++) {
struct intel_wm_level *wm = &merged->wm[level];
@@ -3060,7 +3060,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* Always set WM_LP_SPRITE_EN when spr_val != 0, even if the
* level is disabled. Doing otherwise could cause underruns.
*/
- if (DISPLAY_VER(dev_priv) <= 6 && r->spr_val) {
+ if (DISPLAY_VER(dev_priv) < 7 && r->spr_val) {
drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] |= WM_LP_SPRITE_ENABLE;
}
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index 67143a0f51..ac456a2275 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -330,7 +330,7 @@ static int afe_clk(struct intel_encoder *encoder,
int bpp;
if (crtc_state->dsc.compression_enable)
- bpp = crtc_state->dsc.compressed_bpp;
+ bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -860,7 +860,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* compressed and non-compressed bpp.
*/
if (crtc_state->dsc.compression_enable) {
- mul = crtc_state->dsc.compressed_bpp;
+ mul = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
div = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
}
@@ -884,7 +884,7 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
int bpp, line_time_us, byte_clk_period_ns;
if (crtc_state->dsc.compression_enable)
- bpp = crtc_state->dsc.compressed_bpp;
+ bpp = to_bpp_int(crtc_state->dsc.compressed_bpp_x16);
else
bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1458,8 +1458,8 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
- if (pipe_config->dsc.compressed_bpp) {
- int div = pipe_config->dsc.compressed_bpp;
+ if (pipe_config->dsc.compressed_bpp_x16) {
+ int div = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
int mul = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
adjusted_mode->crtc_htotal =
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 5d18145da2..ec0d5168b5 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -331,9 +331,6 @@ void intel_atomic_state_free(struct drm_atomic_state *_state)
drm_atomic_state_default_release(&state->base);
kfree(state->global_objs);
-
- i915_sw_fence_fini(&state->commit_ready);
-
kfree(state);
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index b107435061..06c2455bdd 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -31,7 +31,10 @@
* prepare/check/commit/cleanup steps.
*/
+#include <linux/dma-fence-chain.h>
+
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
@@ -1012,6 +1015,41 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
return 0;
}
+static int add_dma_resv_fences(struct dma_resv *resv,
+ struct drm_plane_state *new_plane_state)
+{
+ struct dma_fence *fence = dma_fence_get(new_plane_state->fence);
+ struct dma_fence *new;
+ int ret;
+
+ ret = dma_resv_get_singleton(resv, dma_resv_usage_rw(false), &new);
+ if (ret)
+ goto error;
+
+ if (new && fence) {
+ struct dma_fence_chain *chain = dma_fence_chain_alloc();
+
+ if (!chain) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ dma_fence_chain_init(chain, fence, new, 1);
+ fence = &chain->base;
+
+ } else if (new) {
+ fence = new;
+ }
+
+ dma_fence_put(new_plane_state->fence);
+ new_plane_state->fence = fence;
+ return 0;
+
+error:
+ dma_fence_put(fence);
+ return ret;
+}
+
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
* @_plane: drm plane to prepare for
@@ -1035,7 +1073,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct intel_plane_state *old_plane_state =
+ struct intel_plane_state *old_plane_state =
intel_atomic_get_old_plane_state(state, plane);
struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
@@ -1058,55 +1096,28 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
* can safely continue.
*/
if (new_crtc_state && intel_crtc_needs_modeset(new_crtc_state)) {
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv,
- false, 0,
- GFP_KERNEL);
+ ret = add_dma_resv_fences(intel_bo_to_drm_bo(old_obj)->resv,
+ &new_plane_state->uapi);
if (ret < 0)
return ret;
}
}
- if (new_plane_state->uapi.fence) { /* explicit fencing */
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
- ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
- new_plane_state->uapi.fence,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
if (!obj)
return 0;
-
ret = intel_plane_pin_fb(new_plane_state);
if (ret)
return ret;
- i915_gem_object_wait_priority(obj, 0, &attr);
+ ret = drm_gem_plane_helper_prepare_fb(&plane->base, &new_plane_state->uapi);
+ if (ret < 0)
+ goto unpin_fb;
- if (!new_plane_state->uapi.fence) { /* implicit fencing */
- struct dma_resv_iter cursor;
- struct dma_fence *fence;
-
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, false,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- goto unpin_fb;
+ if (new_plane_state->uapi.fence) {
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
- dma_resv_iter_begin(&cursor, obj->base.resv,
- DMA_RESV_USAGE_WRITE);
- dma_resv_for_each_fence_unlocked(&cursor, fence) {
- intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
- }
- dma_resv_iter_end(&cursor);
- } else {
intel_display_rps_boost_after_vblank(new_plane_state->hw.crtc,
new_plane_state->uapi.fence);
}
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 19605264a3..07e0c73204 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -25,6 +25,7 @@
#include <linux/kernel.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include <drm/i915_component.h>
#include "i915_drv.h"
@@ -521,25 +522,25 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
unsigned int link_clks_available, link_clks_required;
unsigned int tu_data, tu_line, link_clks_active;
unsigned int h_active, h_total, hblank_delta, pixel_clk;
- unsigned int fec_coeff, cdclk, vdsc_bpp;
+ unsigned int fec_coeff, cdclk, vdsc_bppx16;
unsigned int link_clk, lanes;
unsigned int hblank_rise;
h_active = crtc_state->hw.adjusted_mode.crtc_hdisplay;
h_total = crtc_state->hw.adjusted_mode.crtc_htotal;
pixel_clk = crtc_state->hw.adjusted_mode.crtc_clock;
- vdsc_bpp = crtc_state->dsc.compressed_bpp;
+ vdsc_bppx16 = crtc_state->dsc.compressed_bpp_x16;
cdclk = i915->display.cdclk.hw.cdclk;
/* fec= 0.972261, using rounding multiplier of 1000000 */
fec_coeff = 972261;
link_clk = crtc_state->port_clock;
lanes = crtc_state->lane_count;
- drm_dbg_kms(&i915->drm, "h_active = %u link_clk = %u :"
- "lanes = %u vdsc_bpp = %u cdclk = %u\n",
- h_active, link_clk, lanes, vdsc_bpp, cdclk);
+ drm_dbg_kms(&i915->drm,
+ "h_active = %u link_clk = %u : lanes = %u vdsc_bpp = " BPP_X16_FMT " cdclk = %u\n",
+ h_active, link_clk, lanes, BPP_X16_ARGS(vdsc_bppx16), cdclk);
- if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bpp || !cdclk))
+ if (WARN_ON(!link_clk || !pixel_clk || !lanes || !vdsc_bppx16 || !cdclk))
return 0;
link_clks_available = (h_total - h_active) * link_clk / pixel_clk - 28;
@@ -551,8 +552,8 @@ static unsigned int calc_hblank_early_prog(struct intel_encoder *encoder,
hblank_delta = DIV64_U64_ROUND_UP(mul_u32_u32(5 * (link_clk + cdclk), pixel_clk),
mul_u32_u32(link_clk, cdclk));
- tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bpp * 8, 1000000),
- mul_u32_u32(link_clk * lanes, fec_coeff));
+ tu_data = div64_u64(mul_u32_u32(pixel_clk * vdsc_bppx16 * 8, 1000000),
+ mul_u32_u32(link_clk * lanes * 16, fec_coeff));
tu_line = div64_u64(h_active * mul_u32_u32(link_clk, fec_coeff),
mul_u32_u32(64 * pixel_clk, 1000000));
link_clks_active = (tu_line - 1) * 64 + tu_data;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 2e8f17c045..3f3cd944a1 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -88,10 +88,10 @@ u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
drm_WARN_ON(&i915->drm, panel->backlight.pwm_level_max == 0);
- if (i915->params.invert_brightness < 0)
+ if (i915->display.params.invert_brightness < 0)
return val;
- if (i915->params.invert_brightness > 0 ||
+ if (i915->display.params.invert_brightness > 0 ||
intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)) {
return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
}
@@ -132,8 +132,9 @@ u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
drm_WARN_ON_ONCE(&i915->drm,
panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
- if (i915->params.invert_brightness > 0 ||
- (i915->params.invert_brightness == 0 && intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
+ if (i915->display.params.invert_brightness > 0 ||
+ (i915->display.params.invert_brightness == 0 &&
+ intel_has_quirk(i915, QUIRK_INVERT_BRIGHTNESS)))
val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
@@ -274,7 +275,7 @@ static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state,
struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void
@@ -427,7 +428,7 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn
intel_backlight_set_pwm_level(old_conn_state, level);
panel->backlight.pwm_state.enabled = false;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
@@ -749,7 +750,7 @@ static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
panel->backlight.pwm_state.enabled = true;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+ pwm_apply_might_sleep(panel->backlight.pwm, &panel->backlight.pwm_state);
}
static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index e7ea287845..eb9835d48d 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -1116,7 +1116,7 @@ parse_sdvo_panel_data(struct drm_i915_private *i915,
struct drm_display_mode *panel_fixed_mode;
int index;
- index = i915->params.vbt_sdvo_panel_type;
+ index = i915->display.params.vbt_sdvo_panel_type;
if (index == -2) {
drm_dbg_kms(&i915->drm,
"Ignore SDVO panel mode from BIOS VBT tables.\n");
@@ -1514,9 +1514,9 @@ parse_edp(struct drm_i915_private *i915,
u8 vswing;
/* Don't read from VBT if module parameter has valid value*/
- if (i915->params.edp_vswing) {
+ if (i915->display.params.edp_vswing) {
panel->vbt.edp.low_vswing =
- i915->params.edp_vswing == 1;
+ i915->display.params.edp_vswing == 1;
} else {
vswing = (edp->edp_vswing_preemph >> (panel_type * 4)) & 0xF;
panel->vbt.edp.low_vswing = vswing == 0;
@@ -2232,6 +2232,9 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
const u8 *ddc_pin_map;
int i, n_entries;
+ if (IS_DGFX(i915))
+ return vbt_pin;
+
if (INTEL_PCH_TYPE(i915) >= PCH_LNL || HAS_PCH_MTP(i915) ||
IS_ALDERLAKE_P(i915)) {
ddc_pin_map = adlp_ddc_pin_map;
@@ -2239,8 +2242,6 @@ static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
} else if (IS_ALDERLAKE_S(i915)) {
ddc_pin_map = adls_ddc_pin_map;
n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
- return vbt_pin;
} else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
@@ -2504,6 +2505,27 @@ static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
devdata->child.device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
}
+static void sanitize_hdmi_level_shift(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+
+ if (!intel_bios_encoder_supports_dvi(devdata))
+ return;
+
+ /*
+ * Some BDW machines (eg. HP Pavilion 15-ab) shipped
+ * with a HSW VBT where the level shifter value goes
+ * up to 11, whereas the BDW max is 9.
+ */
+ if (IS_BROADWELL(i915) && devdata->child.hdmi_level_shifter_value > 9) {
+ drm_dbg_kms(&i915->drm, "Bogus port %c VBT HDMI level shift %d, adjusting to %d\n",
+ port_name(port), devdata->child.hdmi_level_shifter_value, 9);
+
+ devdata->child.hdmi_level_shifter_value = 9;
+ }
+}
+
static bool
intel_bios_encoder_supports_crt(const struct intel_bios_encoder_data *devdata)
{
@@ -2683,6 +2705,7 @@ static void parse_ddi_port(struct intel_bios_encoder_data *devdata)
}
sanitize_device_type(devdata, port);
+ sanitize_hdmi_level_shift(devdata, port);
}
static bool has_ddi_port_info(struct drm_i915_private *i915)
@@ -3426,8 +3449,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
crtc_state->pipe_bpp = bpc * 3;
- crtc_state->dsc.compressed_bpp = min(crtc_state->pipe_bpp,
- VBT_DSC_MAX_BPP(dsc->max_bpp));
+ crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(min(crtc_state->pipe_bpp,
+ VBT_DSC_MAX_BPP(dsc->max_bpp)));
/*
* FIXME: This is ugly, and slice count should take DSC engine
@@ -3486,8 +3509,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!devdata->dsc)
return false;
- if (crtc_state)
- fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
+ fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
return true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index bef96db62c..7f2a50b4f4 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -87,7 +87,8 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
return ret;
dclk = val & 0xffff;
- sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) > 11 ? 500 : 0), 1000);
+ sp->dclk = DIV_ROUND_UP((16667 * dclk) + (DISPLAY_VER(dev_priv) >= 12 ? 500 : 0),
+ 1000);
sp->t_rp = (val & 0xff0000) >> 16;
sp->t_rcd = (val & 0xff000000) >> 24;
@@ -480,7 +481,7 @@ static int tgl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
if (num_channels < qi.max_numchannels && DISPLAY_VER(dev_priv) >= 12)
qi.deinterleave = max(DIV_ROUND_UP(qi.deinterleave, 2), 1);
- if (DISPLAY_VER(dev_priv) > 11 && num_channels > qi.max_numchannels)
+ if (DISPLAY_VER(dev_priv) >= 12 && num_channels > qi.max_numchannels)
drm_warn(&dev_priv->drm, "Number of channels exceeds max number of channels.");
if (qi.max_numchannels != 0)
num_channels = min_t(u8, num_channels, qi.max_numchannels);
@@ -897,7 +898,7 @@ static int icl_find_qgv_points(struct drm_i915_private *i915,
unsigned int idx;
unsigned int max_data_rate;
- if (DISPLAY_VER(i915) > 11)
+ if (DISPLAY_VER(i915) >= 12)
idx = tgl_max_bw_index(i915, num_active_planes, i);
else
idx = icl_max_bw_index(i915, num_active_planes, i);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index c4839c67cb..7ba30d26f6 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -1180,7 +1180,7 @@ sanitize:
/* force cdclk programming */
dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = ~0;
}
static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -1446,50 +1446,77 @@ static u8 bxt_calc_voltage_level(int cdclk)
return DIV_ROUND_UP(cdclk, 25000);
}
+static u8 calc_voltage_level(int cdclk, int num_voltage_levels,
+ const int voltage_level_max_cdclk[])
+{
+ int voltage_level;
+
+ for (voltage_level = 0; voltage_level < num_voltage_levels; voltage_level++) {
+ if (cdclk <= voltage_level_max_cdclk[voltage_level])
+ return voltage_level;
+ }
+
+ MISSING_CASE(cdclk);
+ return num_voltage_levels - 1;
+}
+
static u8 icl_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int icl_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 556800,
+ [2] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(icl_voltage_level_max_cdclk),
+ icl_voltage_level_max_cdclk);
}
static u8 ehl_calc_voltage_level(int cdclk)
{
- if (cdclk > 326400)
- return 3;
- else if (cdclk > 312000)
- return 2;
- else if (cdclk > 180000)
- return 1;
- else
- return 0;
+ static const int ehl_voltage_level_max_cdclk[] = {
+ [0] = 180000,
+ [1] = 312000,
+ [2] = 326400,
+ /*
+ * Bspec lists the limit as 556.8 MHz, but some JSL
+ * development boards (at least) boot with 652.8 MHz
+ */
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(ehl_voltage_level_max_cdclk),
+ ehl_voltage_level_max_cdclk);
}
static u8 tgl_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 3;
- else if (cdclk > 326400)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int tgl_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 326400,
+ [2] = 556800,
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(tgl_voltage_level_max_cdclk),
+ tgl_voltage_level_max_cdclk);
}
static u8 rplu_calc_voltage_level(int cdclk)
{
- if (cdclk > 556800)
- return 3;
- else if (cdclk > 480000)
- return 2;
- else if (cdclk > 312000)
- return 1;
- else
- return 0;
+ static const int rplu_voltage_level_max_cdclk[] = {
+ [0] = 312000,
+ [1] = 480000,
+ [2] = 556800,
+ [3] = 652800,
+ };
+
+ return calc_voltage_level(cdclk,
+ ARRAY_SIZE(rplu_voltage_level_max_cdclk),
+ rplu_voltage_level_max_cdclk);
}
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
@@ -1800,6 +1827,8 @@ static bool cdclk_pll_is_unknown(unsigned int vco)
return vco == ~0;
}
+static const int cdclk_squash_len = 16;
+
static int cdclk_squash_divider(u16 waveform)
{
return hweight16(waveform ?: 0xffff);
@@ -1811,7 +1840,6 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
struct intel_cdclk_config *mid_cdclk_config)
{
u16 old_waveform, new_waveform, mid_waveform;
- int size = 16;
int div = 2;
/* Return if PLL is in an unknown state, force a complete disable and re-enable. */
@@ -1850,7 +1878,8 @@ static bool cdclk_compute_crawl_and_squash_midpoint(struct drm_i915_private *i91
}
mid_cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_squash_divider(mid_waveform) *
- mid_cdclk_config->vco, size * div);
+ mid_cdclk_config->vco,
+ cdclk_squash_len * div);
/* make sure the mid clock came out sane */
@@ -1878,9 +1907,9 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
{
int cdclk = cdclk_config->cdclk;
int vco = cdclk_config->vco;
- u32 val;
+ int unsquashed_cdclk;
u16 waveform;
- int clock;
+ u32 val;
if (HAS_CDCLK_CRAWL(dev_priv) && dev_priv->display.cdclk.hw.vco > 0 && vco > 0 &&
!cdclk_pll_is_unknown(dev_priv->display.cdclk.hw.vco)) {
@@ -1897,15 +1926,13 @@ static void _bxt_set_cdclk(struct drm_i915_private *dev_priv,
waveform = cdclk_squash_waveform(dev_priv, cdclk);
- if (waveform)
- clock = vco / 2;
- else
- clock = cdclk;
+ unsquashed_cdclk = DIV_ROUND_CLOSEST(cdclk * cdclk_squash_len,
+ cdclk_squash_divider(waveform));
if (HAS_CDCLK_SQUASH(dev_priv))
dg2_cdclk_squash_program(dev_priv, waveform);
- val = bxt_cdclk_cd2x_div_sel(dev_priv, clock, vco) |
+ val = bxt_cdclk_cd2x_div_sel(dev_priv, unsquashed_cdclk, vco) |
bxt_cdclk_cd2x_pipe(dev_priv, pipe);
/*
@@ -2075,7 +2102,7 @@ sanitize:
dev_priv->display.cdclk.hw.cdclk = 0;
/* force full PLL disable + enable */
- dev_priv->display.cdclk.hw.vco = -1;
+ dev_priv->display.cdclk.hw.vco = ~0;
}
static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
@@ -2485,7 +2512,8 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ struct intel_cdclk_config cdclk_config;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2494,12 +2522,25 @@ intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_pre_notify(state);
- if (pipe == INVALID_PIPE ||
- old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (new_cdclk_state->disable_pipes) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ } else {
+ if (new_cdclk_state->actual.cdclk >= old_cdclk_state->actual.cdclk) {
+ cdclk_config = new_cdclk_state->actual;
+ pipe = new_cdclk_state->pipe;
+ } else {
+ cdclk_config = old_cdclk_state->actual;
+ pipe = INVALID_PIPE;
+ }
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
+ cdclk_config.voltage_level = max(new_cdclk_state->actual.voltage_level,
+ old_cdclk_state->actual.voltage_level);
}
+
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &cdclk_config, pipe);
}
/**
@@ -2517,7 +2558,7 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
intel_atomic_get_old_cdclk_state(state);
const struct intel_cdclk_state *new_cdclk_state =
intel_atomic_get_new_cdclk_state(state);
- enum pipe pipe = new_cdclk_state->pipe;
+ enum pipe pipe;
if (!intel_cdclk_changed(&old_cdclk_state->actual,
&new_cdclk_state->actual))
@@ -2526,12 +2567,15 @@ intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
if (IS_DG2(i915))
intel_cdclk_pcode_post_notify(state);
- if (pipe != INVALID_PIPE &&
- old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
- drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+ if (!new_cdclk_state->disable_pipes &&
+ new_cdclk_state->actual.cdclk < old_cdclk_state->actual.cdclk)
+ pipe = new_cdclk_state->pipe;
+ else
+ pipe = INVALID_PIPE;
- intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
- }
+ drm_WARN_ON(&i915->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(i915, &new_cdclk_state->actual, pipe);
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2597,9 +2641,10 @@ static int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
* Since PPC = 2 with bigjoiner
* => CDCLK >= compressed_bpp * Pixel clock / 2 * Bigjoiner Interface bits
*/
- int bigjoiner_interface_bits = DISPLAY_VER(i915) > 13 ? 36 : 24;
- int min_cdclk_bj = (crtc_state->dsc.compressed_bpp * pixel_clock) /
- (2 * bigjoiner_interface_bits);
+ int bigjoiner_interface_bits = DISPLAY_VER(i915) >= 14 ? 36 : 24;
+ int min_cdclk_bj =
+ (to_bpp_int_roundup(crtc_state->dsc.compressed_bpp_x16) *
+ pixel_clock) / (2 * bigjoiner_interface_bits);
min_cdclk = max(min_cdclk, min_cdclk_bj);
}
@@ -3008,6 +3053,7 @@ static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_globa
return NULL;
cdclk_state->pipe = INVALID_PIPE;
+ cdclk_state->disable_pipes = false;
return &cdclk_state->base;
}
@@ -3186,6 +3232,8 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
if (ret)
return ret;
+ new_cdclk_state->disable_pipes = true;
+
drm_dbg_kms(&dev_priv->drm,
"Modeset required for cdclk change\n");
}
@@ -3488,7 +3536,7 @@ static const struct intel_cdclk_funcs mtl_cdclk_funcs = {
.get_cdclk = bxt_get_cdclk,
.set_cdclk = bxt_set_cdclk,
.modeset_calc_cdclk = bxt_modeset_calc_cdclk,
- .calc_voltage_level = tgl_calc_voltage_level,
+ .calc_voltage_level = rplu_calc_voltage_level,
};
static const struct intel_cdclk_funcs rplu_cdclk_funcs = {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index 48fd7d39e0..71bc032bfe 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -51,6 +51,9 @@ struct intel_cdclk_state {
/* bitmask of active pipes */
u8 active_pipes;
+
+ /* update cdclk with pipes disabled */
+ bool disable_pipes;
};
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 1d26be54dd..c5092b7e87 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -785,14 +785,12 @@ static void chv_assign_csc(struct intel_crtc_state *crtc_state)
/* convert hw value with given bit_precision to lut property val */
static u32 intel_color_lut_pack(u32 val, int bit_precision)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
+ if (bit_precision > 16)
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(val, (1 << 16) - 1),
+ (1 << bit_precision) - 1);
+ else
+ return DIV_ROUND_CLOSEST(val * ((1 << 16) - 1),
+ (1 << bit_precision) - 1);
}
static u32 i9xx_lut_8(const struct drm_color_lut *color)
@@ -911,7 +909,7 @@ static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
static u16 i965_lut_11p6_max_pack(u32 val)
{
/* PIPEGCMAX is 11.6, clamp to 10.6 */
- return clamp_val(val, 0, 0xffff);
+ return min(val, 0xffffu);
}
static u32 ilk_lut_10(const struct drm_color_lut *color)
@@ -1528,14 +1526,27 @@ static int glk_degamma_lut_size(struct drm_i915_private *i915)
return 35;
}
-/*
- * change_lut_val_precision: helper function to upscale or downscale lut values.
- * Parameters 'to' and 'from' needs to be less than 32. This should be sufficient
- * as currently there are no lut values exceeding 32 bit.
- */
-static u32 change_lut_val_precision(u32 lut_val, int to, int from)
+static u32 glk_degamma_lut(const struct drm_color_lut *color)
+{
+ return color->green;
+}
+
+static void glk_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
+{
+ /* PRE_CSC_GAMC_DATA is 3.16, clamp to 0.16 */
+ entry->red = entry->green = entry->blue = min(val, 0xffffu);
+}
+
+static u32 mtl_degamma_lut(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->green, 24);
+}
+
+static void mtl_degamma_lut_pack(struct drm_color_lut *entry, u32 val)
{
- return mul_u32_u32(lut_val, (1 << to)) / (1 << from);
+ /* PRE_CSC_GAMC_DATA is 3.24, clamp to 0.16 */
+ entry->red = entry->green = entry->blue =
+ intel_color_lut_pack(min(val, 0xffffffu), 24);
}
static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
@@ -1572,20 +1583,16 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state,
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- u32 lut_val;
-
- if (DISPLAY_VER(i915) >= 14)
- lut_val = change_lut_val_precision(lut[i].green, 24, 16);
- else
- lut_val = lut[i].green;
-
ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
- lut_val);
+ DISPLAY_VER(i915) >= 14 ?
+ mtl_degamma_lut(&lut[i]) : glk_degamma_lut(&lut[i]));
}
/* Clamp values > 1.0. */
while (i++ < glk_degamma_lut_size(i915))
- ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ ilk_lut_write(crtc_state, PRE_CSC_GAMC_DATA(pipe),
+ DISPLAY_VER(i915) >= 14 ?
+ 1 << 24 : 1 << 16);
ilk_lut_write(crtc_state, PRE_CSC_GAMC_INDEX(pipe), 0);
}
@@ -3572,17 +3579,10 @@ static struct drm_property_blob *glk_read_degamma_lut(struct intel_crtc *crtc)
for (i = 0; i < lut_size; i++) {
u32 val = intel_de_read_fw(dev_priv, PRE_CSC_GAMC_DATA(pipe));
- /*
- * For MTL and beyond, convert back the 24 bit lut values
- * read from HW to 16 bit values to maintain parity with
- * userspace values
- */
if (DISPLAY_VER(dev_priv) >= 14)
- val = change_lut_val_precision(val, 16, 24);
-
- lut[i].red = val;
- lut[i].green = val;
- lut[i].blue = val;
+ mtl_degamma_lut_pack(&lut[i], val);
+ else
+ glk_degamma_lut_pack(&lut[i], val);
}
intel_de_write_fw(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 6f6b348b8a..abaacea5c2 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -846,7 +846,7 @@ intel_crt_detect(struct drm_connector *connector,
if (!intel_display_device_enabled(dev_priv))
return connector_status_disconnected;
- if (dev_priv->params.load_detect_test) {
+ if (dev_priv->display.params.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
intel_encoder->power_domain);
goto load_detect;
@@ -906,7 +906,7 @@ load_detect:
else if (DISPLAY_VER(dev_priv) < 4)
status = intel_crt_load_detect(crt,
to_intel_crtc(connector->state->crtc)->pipe);
- else if (dev_priv->params.load_detect_test)
+ else if (dev_priv->display.params.load_detect_test)
status = connector_status_disconnected;
else
status = connector_status_unknown;
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 1fd068e6e2..8a84a31c7b 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -553,8 +553,15 @@ void intel_pipe_update_start(struct intel_atomic_state *state,
intel_psr_lock(new_crtc_state);
- if (new_crtc_state->do_async_flip)
+ if (new_crtc_state->do_async_flip) {
+ spin_lock_irq(&crtc->base.dev->event_lock);
+ /* arm the event for the flip done irq handler */
+ crtc->flip_done_event = new_crtc_state->uapi.event;
+ spin_unlock_irq(&crtc->base.dev->event_lock);
+
+ new_crtc_state->uapi.event = NULL;
return;
+ }
if (intel_crtc_needs_vblank_work(new_crtc_state))
intel_crtc_vblank_work_init(new_crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
index 66fe880af8..49fd100ec9 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc_state_dump.c
@@ -4,6 +4,7 @@
*/
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include "i915_drv.h"
#include "intel_crtc_state_dump.h"
@@ -261,6 +262,15 @@ void intel_crtc_state_dump(const struct intel_crtc_state *pipe_config,
drm_dbg_kms(&i915->drm, "fec: %s, enhanced framing: %s\n",
str_enabled_disabled(pipe_config->fec_enable),
str_enabled_disabled(pipe_config->enhanced_framing));
+
+ drm_dbg_kms(&i915->drm, "sdp split: %s\n",
+ str_enabled_disabled(pipe_config->sdp_split_enable));
+
+ drm_dbg_kms(&i915->drm, "psr: %s, psr2: %s, panel replay: %s, selective fetch: %s\n",
+ str_enabled_disabled(pipe_config->has_psr),
+ str_enabled_disabled(pipe_config->has_psr2),
+ str_enabled_disabled(pipe_config->has_panel_replay),
+ str_enabled_disabled(pipe_config->enable_psr2_sel_fetch));
}
drm_dbg_kms(&i915->drm, "framestart delay: %d, MSA timing delay: %d\n",
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index b342fad180..2dde7ac882 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -21,8 +21,11 @@
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "skl_watermark.h"
+#include "gem/i915_gem_object.h"
+
/* Cursor formats */
static const u32 intel_cursor_formats[] = {
DRM_FORMAT_ARGB8888,
@@ -32,12 +35,10 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv =
to_i915(plane_state->uapi.plane->dev);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
u32 base;
if (DISPLAY_INFO(dev_priv)->cursor_needs_physical)
- base = sg_dma_address(obj->mm.pages->sgl);
+ base = plane_state->phys_dma_addr;
else
base = intel_plane_ggtt_offset(plane_state);
@@ -484,6 +485,35 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
return 0;
}
+static void i9xx_cursor_disable_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
+}
+
+static void i9xx_cursor_update_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
+ else
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
+}
+
/* TODO: split into noarm+arm pair */
static void i9xx_cursor_update_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -531,10 +561,10 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane,
skl_write_cursor_wm(plane, crtc_state);
if (plane_state)
- intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state,
- plane_state);
+ i9xx_cursor_update_sel_fetch_arm(plane, crtc_state,
+ plane_state);
else
- intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
+ i9xx_cursor_disable_sel_fetch_arm(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index ccf225afeb..6b25e19523 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -31,7 +31,7 @@
bool intel_is_c10phy(struct drm_i915_private *i915, enum phy phy)
{
- if (DISPLAY_VER_FULL(i915) == IP_VER(14, 0) && phy < PHY_C)
+ if ((IS_LUNARLAKE(i915) || IS_METEORLAKE(i915)) && phy < PHY_C)
return true;
return false;
@@ -206,6 +206,13 @@ static int __intel_cx0_read_once(struct drm_i915_private *i915, enum port port,
intel_clear_response_ready_flag(i915, port, lane);
+ /*
+ * FIXME: Workaround to let HW to settle
+ * down and let the message bus to end up
+ * in a known state
+ */
+ intel_cx0_bus_reset(i915, port, lane);
+
return REG_FIELD_GET(XELPDP_PORT_P2M_DATA_MASK, val);
}
@@ -285,6 +292,13 @@ static int __intel_cx0_write_once(struct drm_i915_private *i915, enum port port,
intel_clear_response_ready_flag(i915, port, lane);
+ /*
+ * FIXME: Workaround to let HW to settle
+ * down and let the message bus to end up
+ * in a known state
+ */
+ intel_cx0_bus_reset(i915, port, lane);
+
return 0;
}
@@ -401,9 +415,15 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(i915, encoder->port);
- u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
+ u8 owned_lane_mask;
intel_wakeref_t wakeref;
int n_entries, ln;
+ struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
+
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
+ return;
+
+ owned_lane_mask = intel_cx0_get_owned_lane_mask(i915, encoder);
wakeref = intel_cx0_phy_transaction_begin(encoder);
@@ -725,7 +745,6 @@ static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = {
/* C20 basic DP 1.4 tables */
static const struct intel_c20pll_state mtl_c20_dp_rbr = {
- .link_bit_rate = 162000,
.clock = 162000,
.tx = { 0xbe88, /* tx cfg0 */
0x5800, /* tx cfg1 */
@@ -751,7 +770,6 @@ static const struct intel_c20pll_state mtl_c20_dp_rbr = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
- .link_bit_rate = 270000,
.clock = 270000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -777,7 +795,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
- .link_bit_rate = 540000,
.clock = 540000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -803,7 +820,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
- .link_bit_rate = 810000,
.clock = 810000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
@@ -830,8 +846,7 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
/* C20 basic DP 2.0 tables */
static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
- .link_bit_rate = 1000000, /* 10 Gbps */
- .clock = 312500,
+ .clock = 1000000, /* 10 Gbps */
.tx = { 0xbe21, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -855,8 +870,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
- .link_bit_rate = 1350000, /* 13.5 Gbps */
- .clock = 421875,
+ .clock = 1350000, /* 13.5 Gbps */
.tx = { 0xbea0, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -881,8 +895,7 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
- .link_bit_rate = 2000000, /* 20 Gbps */
- .clock = 625000,
+ .clock = 2000000, /* 20 Gbps */
.tx = { 0xbe20, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1501,7 +1514,6 @@ static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
- .link_bit_rate = 25175,
.clock = 25175,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1527,7 +1539,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
- .link_bit_rate = 27000,
.clock = 27000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1553,7 +1564,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
- .link_bit_rate = 74250,
.clock = 74250,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1579,7 +1589,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
- .link_bit_rate = 148500,
.clock = 148500,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1605,7 +1614,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
- .link_bit_rate = 594000,
.clock = 594000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
@@ -1631,8 +1639,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
- .link_bit_rate = 3000000,
- .clock = 166670,
+ .clock = 3000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1657,8 +1664,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
- .link_bit_rate = 6000000,
- .clock = 333330,
+ .clock = 6000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1683,8 +1689,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
- .link_bit_rate = 8000000,
- .clock = 444440,
+ .clock = 8000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1709,8 +1714,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
- .link_bit_rate = 10000000,
- .clock = 555560,
+ .clock = 10000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1735,8 +1739,7 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
- .link_bit_rate = 12000000,
- .clock = 666670,
+ .clock = 12000000,
.tx = { 0xbe98, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1850,8 +1853,8 @@ static int intel_c10pll_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
}
-void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c10pll_state *pll_state)
+static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c10pll_state *pll_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 lane = INTEL_CX0_LANE0;
@@ -1985,7 +1988,6 @@ static int intel_c20_compute_hdmi_tmds_pll(u64 pixel_clock, struct intel_c20pll_
else
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0;
- pll_state->link_bit_rate = pixel_clock;
pll_state->clock = pixel_clock;
pll_state->tx[0] = 0xbe88;
pll_state->tx[1] = 0x9800;
@@ -2022,7 +2024,7 @@ static int intel_c20_phy_check_hdmi_link_rate(int clock)
int i;
for (i = 0; tables[i]; i++) {
- if (clock == tables[i]->link_bit_rate)
+ if (clock == tables[i]->clock)
return MODE_OK;
}
@@ -2074,7 +2076,7 @@ static int intel_c20pll_calc_state(struct intel_crtc_state *crtc_state,
return -EINVAL;
for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock == tables[i]->link_bit_rate) {
+ if (crtc_state->port_clock == tables[i]->clock) {
crtc_state->cx0pll_state.c20 = *tables[i];
return 0;
}
@@ -2097,14 +2099,14 @@ int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state,
static bool intel_c20_use_mplla(u32 clock)
{
/* 10G and 20G rates use MPLLA */
- if (clock == 312500 || clock == 625000)
+ if (clock == 1000000 || clock == 2000000)
return true;
return false;
}
-void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c20pll_state *pll_state)
+static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_c20pll_state *pll_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
bool cntx;
@@ -2200,11 +2202,11 @@ static u8 intel_c20_get_dp_rate(u32 clock)
return 6;
case 432000: /* 4.32 Gbps eDP */
return 7;
- case 312500: /* 10 Gbps DP2.0 */
+ case 1000000: /* 10 Gbps DP2.0 */
return 8;
- case 421875: /* 13.5 Gbps DP2.0 */
+ case 1350000: /* 13.5 Gbps DP2.0 */
return 9;
- case 625000: /* 20 Gbps DP2.0*/
+ case 2000000: /* 20 Gbps DP2.0 */
return 10;
case 648000: /* 6.48 Gbps eDP*/
return 11;
@@ -2222,13 +2224,13 @@ static u8 intel_c20_get_hdmi_rate(u32 clock)
return 0;
switch (clock) {
- case 166670: /* 3 Gbps */
- case 333330: /* 6 Gbps */
- case 666670: /* 12 Gbps */
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 1200000: /* 12 Gbps */
return 1;
- case 444440: /* 8 Gbps */
+ case 800000: /* 8 Gbps */
return 2;
- case 555560: /* 10 Gbps */
+ case 1000000: /* 10 Gbps */
return 3;
default:
MISSING_CASE(clock);
@@ -2239,7 +2241,7 @@ static u8 intel_c20_get_hdmi_rate(u32 clock)
static bool is_dp2(u32 clock)
{
/* DP2.0 clock rates */
- if (clock == 312500 || clock == 421875 || clock == 625000)
+ if (clock == 1000000 || clock == 1350000 || clock == 2000000)
return true;
return false;
@@ -2248,11 +2250,11 @@ static bool is_dp2(u32 clock)
static bool is_hdmi_frl(u32 clock)
{
switch (clock) {
- case 166670: /* 3 Gbps */
- case 333330: /* 6 Gbps */
- case 444440: /* 8 Gbps */
- case 555560: /* 10 Gbps */
- case 666670: /* 12 Gbps */
+ case 300000: /* 3 Gbps */
+ case 600000: /* 6 Gbps */
+ case 800000: /* 8 Gbps */
+ case 1000000: /* 10 Gbps */
+ case 1200000: /* 12 Gbps */
return true;
default:
return false;
@@ -2285,6 +2287,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
const struct intel_c20pll_state *pll_state = &crtc_state->cx0pll_state.c20;
bool dp = false;
int lane = crtc_state->lane_count > 2 ? INTEL_CX0_BOTH_LANES : INTEL_CX0_LANE0;
+ u32 clock = crtc_state->port_clock;
bool cntx;
int i;
@@ -2323,7 +2326,7 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
}
/* 3.3 mpllb or mplla configuration */
- if (intel_c20_use_mplla(pll_state->clock)) {
+ if (intel_c20_use_mplla(clock)) {
for (i = 0; i < ARRAY_SIZE(pll_state->mplla); i++) {
if (cntx)
intel_c20_sram_write(i915, encoder->port, INTEL_CX0_LANE0,
@@ -2350,23 +2353,23 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
/* 4. Program custom width to match the link protocol */
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_WIDTH,
PHY_C20_CUSTOM_WIDTH_MASK,
- PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(pll_state->clock, dp)),
+ PHY_C20_CUSTOM_WIDTH(intel_get_c20_custom_width(clock, dp)),
MB_WRITE_COMMITTED);
/* 5. For DP or 6. For HDMI */
if (dp) {
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(6) | PHY_C20_CUSTOM_SERDES_MASK,
- BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(pll_state->clock)),
+ BIT(6) | PHY_C20_CUSTOM_SERDES(intel_c20_get_dp_rate(clock)),
MB_WRITE_COMMITTED);
} else {
intel_cx0_rmw(i915, encoder->port, lane, PHY_C20_VDR_CUSTOM_SERDES_RATE,
BIT(7) | PHY_C20_CUSTOM_SERDES_MASK,
- is_hdmi_frl(pll_state->clock) ? BIT(7) : 0,
+ is_hdmi_frl(clock) ? BIT(7) : 0,
MB_WRITE_COMMITTED);
intel_cx0_write(i915, encoder->port, INTEL_CX0_BOTH_LANES, PHY_C20_VDR_HDMI_RATE,
- intel_c20_get_hdmi_rate(pll_state->clock),
+ intel_c20_get_hdmi_rate(clock),
MB_WRITE_COMMITTED);
}
@@ -2378,8 +2381,8 @@ static void intel_c20_pll_program(struct drm_i915_private *i915,
BIT(0), cntx ? 0 : 1, MB_WRITE_COMMITTED);
}
-int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state)
+static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c10pll_state *pll_state)
{
unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
@@ -2405,8 +2408,8 @@ int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
return tmpclk;
}
-int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state)
+static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_c20pll_state *pll_state)
{
unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
unsigned int multiplier, refclk = 38400;
@@ -3004,17 +3007,115 @@ intel_mtl_port_pll_type(struct intel_encoder *encoder,
return ICL_PORT_DPLL_DEFAULT;
}
-void intel_c10pll_state_verify(struct intel_atomic_state *state,
+static void intel_c10pll_state_verify(const struct intel_crtc_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder,
+ struct intel_c10pll_state *mpllb_hw_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_c10pll_state *mpllb_sw_state = &state->cx0pll_state.c10;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
+ u8 expected = mpllb_sw_state->pll[i];
+
+ I915_STATE_WARN(i915, mpllb_hw_state->pll[i] != expected,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name, i,
+ expected, mpllb_hw_state->pll[i]);
+ }
+
+ I915_STATE_WARN(i915, mpllb_hw_state->tx != mpllb_sw_state->tx,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->tx, mpllb_hw_state->tx);
+
+ I915_STATE_WARN(i915, mpllb_hw_state->cmn != mpllb_sw_state->cmn,
+ "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
+ crtc->base.base.id, crtc->base.name,
+ mpllb_sw_state->cmn, mpllb_hw_state->cmn);
+}
+
+void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_is_c10phy(i915, phy))
+ intel_c10pll_readout_hw_state(encoder, &pll_state->c10);
+ else
+ intel_c20pll_readout_hw_state(encoder, &pll_state->c20);
+}
+
+int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(i915, encoder->port);
+
+ if (intel_is_c10phy(i915, phy))
+ return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
+
+ return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
+}
+
+static void intel_c20pll_state_verify(const struct intel_crtc_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder,
+ struct intel_c20pll_state *mpll_hw_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ const struct intel_c20pll_state *mpll_sw_state = &state->cx0pll_state.c20;
+ bool sw_use_mpllb = mpll_sw_state->tx[0] & C20_PHY_USE_MPLLB;
+ bool hw_use_mpllb = mpll_hw_state->tx[0] & C20_PHY_USE_MPLLB;
+ int i;
+
+ I915_STATE_WARN(i915, sw_use_mpllb != hw_use_mpllb,
+ "[CRTC:%d:%s] mismatch in C20: Register MPLLB selection (expected %d, found %d)",
+ crtc->base.base.id, crtc->base.name,
+ sw_use_mpllb, hw_use_mpllb);
+
+ if (hw_use_mpllb) {
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mpllb); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->mpllb[i] != mpll_sw_state->mpllb[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLB: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mpllb[i], mpll_hw_state->mpllb[i]);
+ }
+ } else {
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->mplla); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->mplla[i] != mpll_sw_state->mplla[i],
+ "[CRTC:%d:%s] mismatch in C20MPLLA: Register[%d] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->mplla[i], mpll_hw_state->mplla[i]);
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->tx); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->tx[i] != mpll_sw_state->tx[i],
+ "[CRTC:%d:%s] mismatch in C20: Register TX[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->tx[i], mpll_hw_state->tx[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mpll_sw_state->cmn); i++) {
+ I915_STATE_WARN(i915, mpll_hw_state->cmn[i] != mpll_sw_state->cmn[i],
+ "[CRTC:%d:%s] mismatch in C20: Register CMN[%i] (expected 0x%04x, found 0x%04x)",
+ crtc->base.base.id, crtc->base.name, i,
+ mpll_sw_state->cmn[i], mpll_hw_state->cmn[i]);
+ }
+}
+
+void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- struct intel_c10pll_state mpllb_hw_state = {};
- const struct intel_c10pll_state *mpllb_sw_state = &new_crtc_state->cx0pll_state.c10;
struct intel_encoder *encoder;
+ struct intel_cx0pll_state mpll_hw_state = {};
enum phy phy;
- int i;
if (DISPLAY_VER(i915) < 14)
return;
@@ -3030,27 +3131,13 @@ void intel_c10pll_state_verify(struct intel_atomic_state *state,
encoder = intel_get_crtc_new_encoder(state, new_crtc_state);
phy = intel_port_to_phy(i915, encoder->port);
- if (!intel_is_c10phy(i915, phy))
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
- intel_c10pll_readout_hw_state(encoder, &mpllb_hw_state);
+ intel_cx0pll_readout_hw_state(encoder, &mpll_hw_state);
- for (i = 0; i < ARRAY_SIZE(mpllb_sw_state->pll); i++) {
- u8 expected = mpllb_sw_state->pll[i];
-
- I915_STATE_WARN(i915, mpllb_hw_state.pll[i] != expected,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register[%d] (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name, i,
- expected, mpllb_hw_state.pll[i]);
- }
-
- I915_STATE_WARN(i915, mpllb_hw_state.tx != mpllb_sw_state->tx,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register TX0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->tx, mpllb_hw_state.tx);
-
- I915_STATE_WARN(i915, mpllb_hw_state.cmn != mpllb_sw_state->cmn,
- "[CRTC:%d:%s] mismatch in C10MPLLB: Register CMN0 (expected 0x%02x, found 0x%02x)",
- crtc->base.base.id, crtc->base.name,
- mpllb_sw_state->cmn, mpllb_hw_state.cmn);
+ if (intel_is_c10phy(i915, phy))
+ intel_c10pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c10);
+ else
+ intel_c20pll_state_verify(new_crtc_state, crtc, encoder, &mpll_hw_state.c20);
}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index 0e0a38dac8..c668267725 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -16,6 +16,7 @@ struct drm_i915_private;
struct intel_atomic_state;
struct intel_c10pll_state;
struct intel_c20pll_state;
+struct intel_cx0pll_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
@@ -28,20 +29,19 @@ void intel_mtl_pll_disable(struct intel_encoder *encoder);
enum icl_port_dpll_id
intel_mtl_port_pll_type(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_c10pll_readout_hw_state(struct intel_encoder *encoder, struct intel_c10pll_state *pll_state);
+
int intel_cx0pll_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder);
+void intel_cx0pll_readout_hw_state(struct intel_encoder *encoder,
+ struct intel_cx0pll_state *pll_state);
+int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
+ const struct intel_cx0pll_state *pll_state);
+
void intel_c10pll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_c10pll_state *hw_state);
-int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state);
-void intel_c10pll_state_verify(struct intel_atomic_state *state,
+void intel_cx0pll_state_verify(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
- struct intel_c20pll_state *pll_state);
void intel_c20pll_dump_hw_state(struct drm_i915_private *i915,
const struct intel_c20pll_state *hw_state);
-int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state);
void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
int intel_cx0_phy_check_hdmi_link_rate(struct intel_hdmi *hdmi, int clock);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 9151d5add9..31aa5d54fd 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -25,6 +25,7 @@
*
*/
+#include <linux/iopoll.h>
#include <linux/string_helpers.h>
#include <drm/display/drm_scdc_helper.h>
@@ -2210,16 +2211,87 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
}
static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_crtc_state *crtc_state,
+ bool enable)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (!crtc_state->fec_enable)
return;
- if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION, DP_FEC_READY) <= 0)
- drm_dbg_kms(&i915->drm,
- "Failed to set FEC_READY in the sink\n");
+ if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
+ enable ? DP_FEC_READY : 0) <= 0)
+ drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n",
+ enable ? "enabled" : "disabled");
+
+ if (enable &&
+ drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
+ DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0)
+ drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n");
+}
+
+static int read_fec_detected_status(struct drm_dp_aux *aux)
+{
+ int ret;
+ u8 status;
+
+ ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status);
+ if (ret < 0)
+ return ret;
+
+ return status;
+}
+
+static void wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled)
+{
+ struct drm_i915_private *i915 = to_i915(aux->drm_dev);
+ int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED;
+ int status;
+ int err;
+
+ err = readx_poll_timeout(read_fec_detected_status, aux, status,
+ status & mask || status < 0,
+ 10000, 200000);
+
+ if (!err && status >= 0)
+ return;
+
+ if (err == -ETIMEDOUT)
+ drm_dbg_kms(&i915->drm, "Timeout waiting for FEC %s to get detected\n",
+ str_enabled_disabled(enabled));
+ else
+ drm_dbg_kms(&i915->drm, "FEC detected status read error: %d\n", status);
+}
+
+void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int ret;
+
+ if (!crtc_state->fec_enable)
+ return;
+
+ if (enabled)
+ ret = intel_de_wait_for_set(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+ else
+ ret = intel_de_wait_for_clear(i915, dp_tp_status_reg(encoder, crtc_state),
+ DP_TP_STATUS_FEC_ENABLE_LIVE, 1);
+
+ if (ret)
+ drm_err(&i915->drm,
+ "Timeout waiting for FEC live state to get %s\n",
+ str_enabled_disabled(enabled));
+
+ /*
+ * At least the Synoptics MST hub doesn't set the detected flag for
+ * FEC decoding disabling so skip waiting for that.
+ */
+ if (enabled)
+ wait_for_fec_detected(&intel_dp->aux, enabled);
}
static void intel_ddi_enable_fec(struct intel_encoder *encoder,
@@ -2234,8 +2306,8 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
0, DP_TP_CTL_FEC_ENABLE);
}
-static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+static void intel_ddi_disable_fec(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -2466,13 +2538,17 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
+
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
* in the FEC_CONFIGURATION register to 1 before initiating link
* training
*/
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_check_frl_training(intel_dp);
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
@@ -2505,7 +2581,8 @@ static void mtl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 6.o Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ if (!is_mst)
+ intel_dsc_dp_pps_write(encoder, crtc_state);
}
static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2616,13 +2693,16 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
* in the FEC_CONFIGURATION register to 1 before initiating link
* training
*/
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_check_frl_training(intel_dp);
intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
@@ -2643,7 +2723,8 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
/* 7.l Configure and enable FEC if needed */
intel_ddi_enable_fec(encoder, crtc_state);
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ if (!is_mst)
+ intel_dsc_dp_pps_write(encoder, crtc_state);
}
static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2695,9 +2776,11 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
- intel_dp_sink_set_decompression_state(intel_dp, crtc_state,
- true);
- intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ if (!is_mst)
+ intel_dp_sink_enable_decompression(state,
+ to_intel_connector(conn_state->connector),
+ crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, crtc_state, true);
intel_dp_start_link_train(intel_dp, crtc_state);
if ((port != PORT_A || DISPLAY_VER(dev_priv) >= 9) &&
!is_trans_port_sync_mode(crtc_state))
@@ -2705,10 +2788,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_fec(encoder, crtc_state);
- if (!is_mst)
+ if (!is_mst) {
intel_ddi_enable_transcoder_clock(encoder, crtc_state);
-
- intel_dsc_dp_pps_write(encoder, crtc_state);
+ intel_dsc_dp_pps_write(encoder, crtc_state);
+ }
}
static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
@@ -2717,10 +2800,15 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state,
const struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (HAS_DP20(dev_priv))
+ if (HAS_DP20(dev_priv)) {
intel_dp_128b132b_sdp_crc16(enc_to_intel_dp(encoder),
crtc_state);
+ if (crtc_state->has_panel_replay)
+ drm_dp_dpcd_writeb(&intel_dp->aux, PANEL_REPLAY_CONFIG,
+ DP_PANEL_REPLAY_ENABLE);
+ }
if (DISPLAY_VER(dev_priv) >= 14)
mtl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state);
@@ -2866,8 +2954,7 @@ static void disable_ddi_buf(struct intel_encoder *encoder,
intel_de_rmw(dev_priv, dp_tp_ctl_reg(encoder, crtc_state),
DP_TP_CTL_ENABLE, 0);
- /* Disable FEC in DP Sink */
- intel_ddi_disable_fec_state(encoder, crtc_state);
+ intel_ddi_disable_fec(encoder, crtc_state);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -2882,10 +2969,12 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
mtl_disable_ddi_buf(encoder, crtc_state);
/* 3.f Disable DP_TP_CTL FEC Enable if it is needed */
- intel_ddi_disable_fec_state(encoder, crtc_state);
+ intel_ddi_disable_fec(encoder, crtc_state);
} else {
disable_ddi_buf(encoder, crtc_state);
}
+
+ intel_ddi_wait_for_fec_status(encoder, crtc_state, false);
}
static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
@@ -2925,6 +3014,8 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_disable_ddi_buf(encoder, old_crtc_state);
+ intel_dp_sink_set_fec_ready(intel_dp, old_crtc_state, false);
+
/*
* From TGL spec: "If single stream or multi-stream master transcoder:
* Configure Transcoder Clock select to direct no clock to the
@@ -3110,11 +3201,18 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
if (!dig_port->lspcon.active || intel_dp_has_hdmi_sink(&dig_port->dp))
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
-
trans_port_sync_stop_link_train(state, encoder, crtc_state);
}
+/* FIXME bad home for this function */
+i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder)
+{
+ return DISPLAY_VER(i915) >= 14 ?
+ MTL_CHICKEN_TRANS(cpu_transcoder) :
+ CHICKEN_TRANS(cpu_transcoder);
+}
+
static i915_reg_t
gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
enum port port)
@@ -3233,8 +3331,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
intel_de_write(dev_priv, DDI_BUF_CTL(port), buf_ctl);
intel_wait_ddi_buf_active(dev_priv, port);
-
- intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
static void intel_enable_ddi(struct intel_atomic_state *state,
@@ -3252,6 +3348,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_enable_transcoder(crtc_state);
+ intel_ddi_wait_for_fec_status(encoder, crtc_state, true);
+
intel_crtc_vblank_on(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
@@ -3259,10 +3357,8 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
else
intel_enable_ddi_dp(state, encoder, crtc_state, conn_state);
- /* Enable hdcp if it's desired */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+ intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+
}
static void intel_disable_ddi_dp(struct intel_atomic_state *state,
@@ -3271,16 +3367,16 @@ static void intel_disable_ddi_dp(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_connector *connector =
+ to_intel_connector(old_conn_state->connector);
intel_dp->link_trained = false;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
intel_psr_disable(intel_dp, old_crtc_state);
intel_edp_backlight_off(old_conn_state);
/* Disable the decompression in DP Sink */
- intel_dp_sink_set_decompression_state(intel_dp, old_crtc_state,
- false);
+ intel_dp_sink_disable_decompression(state,
+ connector, old_crtc_state);
/* Disable Ignore_MSA bit in DP Sink */
intel_dp_sink_set_msa_timing_par_ignore_state(intel_dp, old_crtc_state,
false);
@@ -3294,8 +3390,6 @@ static void intel_disable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct drm_connector *connector = old_conn_state->connector;
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
-
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
false, false))
drm_dbg_kms(&i915->drm,
@@ -3578,16 +3672,42 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state)
+static int tgl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
{
- if (DISPLAY_VER(dev_priv) >= 12 && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 2;
- else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
- crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 3;
- else if (DISPLAY_VER(dev_priv) >= 11 && crtc_state->port_clock > 594000)
- crtc_state->min_voltage_level = 1;
+ if (crtc_state->port_clock > 594000)
+ return 2;
+ else
+ return 0;
+}
+
+static int jsl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->port_clock > 594000)
+ return 3;
+ else
+ return 0;
+}
+
+static int icl_ddi_min_voltage_level(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->port_clock > 594000)
+ return 1;
+ else
+ return 0;
+}
+
+void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ if (DISPLAY_VER(dev_priv) >= 14)
+ crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ crtc_state->min_voltage_level = tgl_ddi_min_voltage_level(crtc_state);
+ else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv))
+ crtc_state->min_voltage_level = jsl_ddi_min_voltage_level(crtc_state);
+ else if (DISPLAY_VER(dev_priv) >= 11)
+ crtc_state->min_voltage_level = icl_ddi_min_voltage_level(crtc_state);
}
static enum transcoder bdw_transcoder_master_readout(struct drm_i915_private *dev_priv,
@@ -3801,7 +3921,7 @@ static void intel_ddi_get_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
@@ -3854,18 +3974,13 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
static void mtl_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
- } else if (intel_is_c10phy(i915, phy)) {
- intel_c10pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c10);
- crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
} else {
- intel_c20pll_readout_hw_state(encoder, &crtc_state->cx0pll_state.c20);
- crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+ intel_cx0pll_readout_hw_state(encoder, &crtc_state->cx0pll_state);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
}
intel_ddi_get_config(encoder, crtc_state);
@@ -4086,7 +4201,7 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_ddi_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
return 0;
}
@@ -4114,7 +4229,12 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1,
static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
const struct intel_crtc_state *crtc_state2)
{
+ /*
+ * FIXME the modeset sequence is currently wrong and
+ * can't deal with bigjoiner + port sync at the same time.
+ */
return crtc_state1->hw.active && crtc_state2->hw.active &&
+ !crtc_state1->bigjoiner_pipes && !crtc_state2->bigjoiner_pipes &&
crtc_state1->output_types == crtc_state2->output_types &&
crtc_state1->output_format == crtc_state2->output_format &&
crtc_state1->lane_count == crtc_state2->lane_count &&
@@ -4844,6 +4964,8 @@ void intel_ddi_init(struct drm_i915_private *dev_priv,
encoder->post_pll_disable = intel_ddi_post_pll_disable;
encoder->post_disable = intel_ddi_post_disable;
encoder->update_pipe = intel_ddi_update_pipe;
+ encoder->audio_enable = intel_audio_codec_enable;
+ encoder->audio_disable = intel_audio_codec_disable;
encoder->get_hw_state = intel_ddi_get_hw_state;
encoder->sync_state = intel_ddi_sync_state;
encoder->initial_fastset_check = intel_ddi_initial_fastset_check;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 4999c0ee22..434de71968 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -27,6 +27,8 @@ i915_reg_t dp_tp_ctl_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
i915_reg_t dp_tp_status_reg(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+i915_reg_t hsw_chicken_trans_reg(struct drm_i915_private *i915,
+ enum transcoder cpu_transcoder);
void intel_ddi_fdi_post_disable(struct intel_atomic_state *state,
struct intel_encoder *intel_encoder,
const struct intel_crtc_state *old_crtc_state,
@@ -60,13 +62,15 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
void intel_ddi_enable_transcoder_clock(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_transcoder_clock(const struct intel_crtc_state *crtc_state);
+void intel_ddi_wait_for_fec_status(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ bool enabled);
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
-void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
- struct intel_crtc_state *crtc_state);
+void intel_ddi_compute_min_voltage_level(struct intel_crtc_state *crtc_state);
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask);
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index df582ff81b..b10aad15a6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -48,6 +48,7 @@
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "hsw_ips.h"
+#include "i915_config.h"
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -72,10 +73,10 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
-#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
+#include "intel_dpt_common.h"
#include "intel_drrs.h"
#include "intel_dsb.h"
#include "intel_dsi.h"
@@ -193,12 +194,9 @@ static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
- if (enable)
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
- 0, DUPS1_GATING_DIS | DUPS2_GATING_DIS);
- else
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
- DUPS1_GATING_DIS | DUPS2_GATING_DIS, 0);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DUPS1_GATING_DIS | DUPS2_GATING_DIS,
+ enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0);
}
/* Wa_2006604312:icl,ehl */
@@ -206,10 +204,9 @@ static void
icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
- if (enable)
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), 0, DPFR_GATING_DIS);
- else
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), DPFR_GATING_DIS, 0);
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ DPFR_GATING_DIS,
+ enable ? DPFR_GATING_DIS : 0);
}
/* Wa_1604331009:icl,jsl,ehl */
@@ -217,7 +214,8 @@ static void
icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
- intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe),
+ CURSOR_GATING_DIS,
enable ? CURSOR_GATING_DIS : 0);
}
@@ -397,7 +395,6 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
@@ -430,16 +427,16 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
- reg = TRANSCONF(cpu_transcoder);
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if (val & TRANSCONF_ENABLE) {
/* we keep both pipes enabled on 830 */
drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- intel_de_write(dev_priv, reg, val | TRANSCONF_ENABLE);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder),
+ val | TRANSCONF_ENABLE);
+ intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder));
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -458,7 +455,6 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
- i915_reg_t reg;
u32 val;
drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
@@ -469,8 +465,7 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
*/
assert_planes_disabled(crtc);
- reg = TRANSCONF(cpu_transcoder);
- val = intel_de_read(dev_priv, reg);
+ val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder));
if ((val & TRANSCONF_ENABLE) == 0)
return;
@@ -485,14 +480,12 @@ void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~TRANSCONF_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 14)
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder),
- FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
- else if (DISPLAY_VER(dev_priv) >= 12)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder),
+ intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val);
+
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
FECSTALL_DIS_DPTSTREAM_DPTTG, 0);
- intel_de_write(dev_priv, reg, val);
if ((val & TRANSCONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -896,6 +889,48 @@ static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
(DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
}
+static void intel_encoders_audio_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct drm_connector_state *conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, conn, conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+
+ if (conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->audio_enable)
+ encoder->audio_enable(encoder, crtc_state, conn_state);
+ }
+}
+
+static void intel_encoders_audio_disable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ const struct drm_connector_state *old_conn_state;
+ struct drm_connector *conn;
+ int i;
+
+ for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(old_conn_state->best_encoder);
+
+ if (old_conn_state->crtc != &crtc->base)
+ continue;
+
+ if (encoder->audio_disable)
+ encoder->audio_disable(encoder, old_crtc_state, old_conn_state);
+ }
+}
+
#define is_enabling(feature, old_crtc_state, new_crtc_state) \
((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \
(new_crtc_state)->feature)
@@ -955,6 +990,28 @@ static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state,
vrr_params_changed(old_crtc_state, new_crtc_state)));
}
+static bool audio_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(has_audio, old_crtc_state, new_crtc_state) ||
+ (new_crtc_state->has_audio &&
+ memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
+}
+
+static bool audio_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!old_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(has_audio, old_crtc_state, new_crtc_state) ||
+ (old_crtc_state->has_audio &&
+ memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0);
+}
+
#undef is_disabling
#undef is_enabling
@@ -995,6 +1052,9 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (intel_crtc_needs_color_update(new_crtc_state))
intel_color_post_update(new_crtc_state);
+
+ if (audio_enabling(old_crtc_state, new_crtc_state))
+ intel_encoders_audio_enable(state, crtc);
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -1078,6 +1138,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_crtc_update_active_timings(old_crtc_state, false);
}
+ if (audio_disabling(old_crtc_state, new_crtc_state))
+ intel_encoders_audio_disable(state, crtc);
+
intel_drrs_deactivate(old_crtc_state);
intel_psr_pre_plane_update(state, crtc);
@@ -1513,12 +1576,9 @@ static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum transcoder transcoder = crtc_state->cpu_transcoder;
- i915_reg_t reg = DISPLAY_VER(dev_priv) >= 14 ? MTL_CHICKEN_TRANS(transcoder) :
- CHICKEN_TRANS(transcoder);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- intel_de_rmw(dev_priv, reg,
+ intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder),
HSW_FRAME_START_DELAY_MASK,
HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1));
}
@@ -1796,31 +1856,31 @@ bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy)
{
+ /*
+ * DG2's "TC1", although TC-capable output, doesn't share the same flow
+ * as other platforms on the display engine side and rather rely on the
+ * SNPS PHY, that is programmed separately
+ */
if (IS_DG2(dev_priv))
- /* DG2's "TC1" output uses a SNPS PHY */
return false;
- else if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER_FULL(dev_priv) == IP_VER(14, 0))
+
+ if (DISPLAY_VER(dev_priv) >= 13)
return phy >= PHY_F && phy <= PHY_I;
else if (IS_TIGERLAKE(dev_priv))
return phy >= PHY_D && phy <= PHY_I;
else if (IS_ICELAKE(dev_priv))
return phy >= PHY_C && phy <= PHY_F;
- else
- return false;
+
+ return false;
}
bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy)
{
- if (phy == PHY_NONE)
- return false;
- else if (IS_DG2(dev_priv))
- /*
- * All four "combo" ports and the TC1 port (PHY E) use
- * Synopsis PHYs.
- */
- return phy <= PHY_E;
-
- return false;
+ /*
+ * For DG2, and for DG2 only, all four "combo" ports and the TC1 port
+ * (PHY E) use Synopsis PHYs. See intel_phy_is_tc().
+ */
+ return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E;
}
enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port)
@@ -2409,15 +2469,15 @@ static void compute_m_n(u32 *ret_m, u32 *ret_n,
}
void
-intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
+intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool fec_enable)
+ int bw_overhead,
+ struct intel_link_m_n *m_n)
{
- u32 data_clock = bits_per_pixel * pixel_clock;
-
- if (fec_enable)
- data_clock = intel_dp_mode_to_fec_clock(data_clock);
+ u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock);
+ u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16,
+ bw_overhead);
+ u32 data_n = intel_dp_max_data_rate(link_clock, nlanes);
/*
* Windows/BIOS uses fixed M/N values always. Follow suit.
@@ -2428,11 +2488,11 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
*/
m_n->tu = 64;
compute_m_n(&m_n->data_m, &m_n->data_n,
- data_clock, link_clock * nlanes * 8,
+ data_m, data_n,
0x8000000);
compute_m_n(&m_n->link_m, &m_n->link_n,
- pixel_clock, link_clock,
+ pixel_clock, link_symbol_clock,
0x80000);
}
@@ -2567,7 +2627,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_sta
crtc_vblank_start = 1;
}
- if (DISPLAY_VER(dev_priv) > 3)
+ if (DISPLAY_VER(dev_priv) >= 4)
intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder),
vsyncshift);
@@ -2850,67 +2910,6 @@ static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state)
intel_de_read(dev_priv, PFIT_PGM_RATIOS);
}
-static void vlv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- struct dpll clock;
- u32 mdiv;
- int refclk = 100000;
-
- /* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
- mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
- vlv_dpio_put(dev_priv);
-
- clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
- clock.m2 = mdiv & DPIO_M2DIV_MASK;
- clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
- clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
- clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
-
- pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
-}
-
-static void chv_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- struct dpll clock;
- u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
- int refclk = 100000;
-
- /* In case of DSI, DPLL will not be used */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- vlv_dpio_get(dev_priv);
- cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
- pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
- pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
- pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
- pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
- vlv_dpio_put(dev_priv);
-
- clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
- clock.m2 = (pll_dw0 & 0xff) << 22;
- if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
- clock.m2 |= pll_dw2 & 0x3fffff;
- clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
- clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
- clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
-
- pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
-}
-
static enum intel_output_format
bdw_get_pipe_misc_output_format(struct intel_crtc *crtc)
{
@@ -3168,7 +3167,7 @@ static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state)
break;
case 36:
/* Port output 12BPC defined for ADLP+ */
- if (DISPLAY_VER(dev_priv) > 12)
+ if (DISPLAY_VER(dev_priv) >= 13)
val |= PIPE_MISC_BPC_12_ADLP;
break;
default:
@@ -3225,7 +3224,7 @@ int bdw_get_pipe_misc_bpp(struct intel_crtc *crtc)
* MIPI DSI HW readout.
*/
case PIPE_MISC_BPC_12_ADLP:
- if (DISPLAY_VER(dev_priv) > 12)
+ if (DISPLAY_VER(dev_priv) >= 13)
return 36;
fallthrough;
default:
@@ -3802,9 +3801,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
}
if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
- tmp = intel_de_read(dev_priv, DISPLAY_VER(dev_priv) >= 14 ?
- MTL_CHICKEN_TRANS(pipe_config->cpu_transcoder) :
- CHICKEN_TRANS(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, hsw_chicken_trans_reg(dev_priv, pipe_config->cpu_transcoder));
pipe_config->framestart_delay = REG_FIELD_GET(HSW_FRAME_START_DELAY_MASK, tmp) + 1;
} else {
@@ -3833,133 +3830,27 @@ bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
return true;
}
-static int i9xx_pll_refclk(struct drm_device *dev,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
-
- if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
- return dev_priv->display.vbt.lvds_ssc_freq;
- else if (HAS_PCH_SPLIT(dev_priv))
- return 120000;
- else if (DISPLAY_VER(dev_priv) != 2)
- return 96000;
- else
- return 48000;
-}
-
-/* Returns the clock of the currently programmed mode of the given pipe. */
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- u32 dpll = pipe_config->dpll_hw_state.dpll;
- u32 fp;
- struct dpll clock;
- int port_clock;
- int refclk = i9xx_pll_refclk(dev, pipe_config);
-
- if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
- fp = pipe_config->dpll_hw_state.fp0;
- else
- fp = pipe_config->dpll_hw_state.fp1;
-
- clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
- if (IS_PINEVIEW(dev_priv)) {
- clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
- clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
- } else {
- clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
- clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
- }
-
- if (DISPLAY_VER(dev_priv) != 2) {
- if (IS_PINEVIEW(dev_priv))
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
- else
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT);
-
- switch (dpll & DPLL_MODE_MASK) {
- case DPLLB_MODE_DAC_SERIAL:
- clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
- 5 : 10;
- break;
- case DPLLB_MODE_LVDS:
- clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
- 7 : 14;
- break;
- default:
- drm_dbg_kms(&dev_priv->drm,
- "Unknown DPLL mode %08x in programmed "
- "mode\n", (int)(dpll & DPLL_MODE_MASK));
- return;
- }
-
- if (IS_PINEVIEW(dev_priv))
- port_clock = pnv_calc_dpll_params(refclk, &clock);
- else
- port_clock = i9xx_calc_dpll_params(refclk, &clock);
- } else {
- enum pipe lvds_pipe;
-
- if (IS_I85X(dev_priv) &&
- intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
- lvds_pipe == crtc->pipe) {
- u32 lvds = intel_de_read(dev_priv, LVDS);
-
- clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT);
-
- if (lvds & LVDS_CLKB_POWER_UP)
- clock.p2 = 7;
- else
- clock.p2 = 14;
- } else {
- if (dpll & PLL_P1_DIVIDE_BY_TWO)
- clock.p1 = 2;
- else {
- clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
- DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
- }
- if (dpll & PLL_P2_DIVIDE_BY_4)
- clock.p2 = 4;
- else
- clock.p2 = 2;
- }
-
- port_clock = i9xx_calc_dpll_params(refclk, &clock);
- }
-
- /*
- * This value includes pixel_multiplier. We will use
- * port_clock to compute adjusted_mode.crtc_clock in the
- * encoder's get_config() function.
- */
- pipe_config->port_clock = port_clock;
-}
-
int intel_dotclock_calculate(int link_freq,
const struct intel_link_m_n *m_n)
{
/*
- * The calculation for the data clock is:
+ * The calculation for the data clock -> pixel clock is:
* pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
* But we want to avoid losing precison if possible, so:
* pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
*
- * and the link clock is simpler:
- * link_clock = (m * link_clock) / n
+ * and for link freq (10kbs units) -> pixel clock it is:
+ * link_symbol_clock = link_freq * 10 / link_symbol_size
+ * pixel_clock = (m * link_symbol_clock) / n
+ * or for more precision:
+ * pixel_clock = (m * link_freq * 10) / (n * link_symbol_size)
*/
if (!m_n->link_n)
return 0;
- return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq),
- m_n->link_n);
+ return DIV_ROUND_UP_ULL(mul_u32_u32(m_n->link_m, link_freq * 10),
+ m_n->link_n * intel_dp_link_symbol_size(link_freq));
}
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config)
@@ -4691,6 +4582,7 @@ intel_modeset_pipe_config(struct intel_atomic_state *state,
if (ret)
return ret;
+ crtc_state->fec_enable = limits->force_fec_pipes & BIT(crtc->pipe);
crtc_state->max_link_bpp_x16 = limits->max_bpp_x16[crtc->pipe];
if (crtc_state->pipe_bpp > to_bpp_int(crtc_state->max_link_bpp_x16)) {
@@ -5031,6 +4923,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name, \
@@ -5041,6 +4935,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_X_WITH_MASK(name, mask) do { \
if ((current_config->name & (mask)) != (pipe_config->name & (mask))) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected 0x%08x, found 0x%08x)", \
current_config->name & (mask), \
@@ -5051,6 +4947,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_I(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(__same_type(current_config->name, bool), \
+ __stringify(name) " is bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected %i, found %i)", \
current_config->name, \
@@ -5061,6 +4959,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_CHECK_BOOL(name) do { \
if (current_config->name != pipe_config->name) { \
+ BUILD_BUG_ON_MSG(!__same_type(current_config->name, bool), \
+ __stringify(name) " is not bool"); \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
"(expected %s, found %s)", \
str_yes_no(current_config->name), \
@@ -5069,23 +4969,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
} \
} while (0)
-/*
- * Checks state where we only read out the enabling, but not the entire
- * state itself (like full infoframes or ELD for audio). These states
- * require a full modeset on bootup to fix up.
- */
-#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
- if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
- PIPE_CONF_CHECK_BOOL(name); \
- } else { \
- pipe_config_mismatch(fastset, crtc, __stringify(name), \
- "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \
- str_yes_no(current_config->name), \
- str_yes_no(pipe_config->name)); \
- ret = false; \
- } \
-} while (0)
-
#define PIPE_CONF_CHECK_P(name) do { \
if (current_config->name != pipe_config->name) { \
pipe_config_mismatch(fastset, crtc, __stringify(name), \
@@ -5216,8 +5099,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#define PIPE_CONF_QUIRK(quirk) \
((current_config->quirks | pipe_config->quirks) & (quirk))
- PIPE_CONF_CHECK_I(hw.enable);
- PIPE_CONF_CHECK_I(hw.active);
+ PIPE_CONF_CHECK_BOOL(hw.enable);
+ PIPE_CONF_CHECK_BOOL(hw.active);
PIPE_CONF_CHECK_I(cpu_transcoder);
PIPE_CONF_CHECK_I(mst_master_transcoder);
@@ -5273,8 +5156,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(enhanced_framing);
PIPE_CONF_CHECK_BOOL(fec_enable);
- PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
- PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
+ if (!fastset) {
+ PIPE_CONF_CHECK_BOOL(has_audio);
+ PIPE_CONF_CHECK_BUFFER(eld, MAX_ELD_BYTES);
+ }
PIPE_CONF_CHECK_X(gmch_pfit.control);
/* pfit ratios are autocomputed by the hw on gen4+ */
@@ -5424,9 +5309,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.second_line_bpg_offset);
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
- PIPE_CONF_CHECK_I(dsc.compression_enable);
- PIPE_CONF_CHECK_I(dsc.dsc_split);
- PIPE_CONF_CHECK_I(dsc.compressed_bpp);
+ PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
+ PIPE_CONF_CHECK_BOOL(dsc.dsc_split);
+ PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
PIPE_CONF_CHECK_I(splitter.link_count);
@@ -5444,7 +5329,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
#undef PIPE_CONF_CHECK_X
#undef PIPE_CONF_CHECK_I
#undef PIPE_CONF_CHECK_BOOL
-#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
#undef PIPE_CONF_CHECK_P
#undef PIPE_CONF_CHECK_FLAGS
#undef PIPE_CONF_CHECK_COLOR_LUT
@@ -5535,6 +5419,16 @@ int intel_modeset_pipes_in_mask_early(struct intel_atomic_state *state,
return 0;
}
+static void
+intel_crtc_flag_modeset(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->uapi.mode_changed = true;
+
+ crtc_state->update_pipe = false;
+ crtc_state->update_m_n = false;
+ crtc_state->update_lrr = false;
+}
+
/**
* intel_modeset_all_pipes_late - force a full modeset on all pipes
* @state: intel atomic state
@@ -5568,9 +5462,8 @@ int intel_modeset_all_pipes_late(struct intel_atomic_state *state,
if (ret)
return ret;
- crtc_state->update_pipe = false;
- crtc_state->update_m_n = false;
- crtc_state->update_lrr = false;
+ intel_crtc_flag_modeset(crtc_state);
+
crtc_state->update_planes |= crtc_state->active_planes;
crtc_state->async_flip_planes = 0;
crtc_state->do_async_flip = false;
@@ -5683,17 +5576,17 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta
else
new_crtc_state->uapi.mode_changed = false;
- if (intel_crtc_needs_modeset(new_crtc_state) ||
- intel_compare_link_m_n(&old_crtc_state->dp_m_n,
+ if (intel_compare_link_m_n(&old_crtc_state->dp_m_n,
&new_crtc_state->dp_m_n))
new_crtc_state->update_m_n = false;
- if (intel_crtc_needs_modeset(new_crtc_state) ||
- (old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
+ if ((old_crtc_state->hw.adjusted_mode.crtc_vtotal == new_crtc_state->hw.adjusted_mode.crtc_vtotal &&
old_crtc_state->hw.adjusted_mode.crtc_vblank_end == new_crtc_state->hw.adjusted_mode.crtc_vblank_end))
new_crtc_state->update_lrr = false;
- if (!intel_crtc_needs_modeset(new_crtc_state))
+ if (intel_crtc_needs_modeset(new_crtc_state))
+ intel_crtc_flag_modeset(new_crtc_state);
+ else
new_crtc_state->update_pipe = true;
}
@@ -6476,15 +6369,14 @@ int intel_atomic_check(struct drm_device *dev,
if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state))
continue;
+ if (intel_dp_mst_crtc_needs_modeset(state, crtc))
+ intel_crtc_flag_modeset(new_crtc_state);
+
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
enum transcoder master = new_crtc_state->mst_master_transcoder;
- if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master)))
+ intel_crtc_flag_modeset(new_crtc_state);
}
if (is_trans_port_sync_mode(new_crtc_state)) {
@@ -6493,21 +6385,13 @@ int intel_atomic_check(struct drm_device *dev,
if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
trans |= BIT(new_crtc_state->master_transcoder);
- if (intel_cpu_transcoders_need_modeset(state, trans)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_cpu_transcoders_need_modeset(state, trans))
+ intel_crtc_flag_modeset(new_crtc_state);
}
if (new_crtc_state->bigjoiner_pipes) {
- if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- new_crtc_state->update_m_n = false;
- new_crtc_state->update_lrr = false;
- }
+ if (intel_pipes_need_modeset(state, new_crtc_state->bigjoiner_pipes))
+ intel_crtc_flag_modeset(new_crtc_state);
}
}
@@ -6528,10 +6412,6 @@ int intel_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = drm_dp_mst_atomic_check(&state->base);
- if (ret)
- goto fail;
-
ret = intel_atomic_check_planes(state);
if (ret)
goto fail;
@@ -6767,8 +6647,8 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_enable_pipe_crc(crtc);
}
-static void intel_update_crtc(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void intel_pre_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
@@ -6810,6 +6690,15 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_color_commit_noarm(new_crtc_state);
intel_crtc_planes_update_noarm(state, crtc);
+}
+
+static void intel_update_crtc(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *old_crtc_state =
+ intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(state, crtc);
@@ -6838,7 +6727,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
* valid pipe configuration from the BIOS we need to take care
* of enabling them on the CRTC's first fastset.
*/
- if (intel_crtc_needs_fastset(new_crtc_state) && !modeset &&
+ if (intel_crtc_needs_fastset(new_crtc_state) &&
old_crtc_state->inherited)
intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
}
@@ -6934,6 +6823,13 @@ static void intel_commit_modeset_enables(struct intel_atomic_state *state)
continue;
intel_enable_crtc(state, crtc);
+ intel_pre_update_crtc(state, crtc);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (!new_crtc_state->hw.active)
+ continue;
+
intel_update_crtc(state, crtc);
}
}
@@ -6971,6 +6867,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
* So first lets enable all pipes that do not need a fullmodeset as
* those don't have any external dependency.
*/
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
+ intel_pre_update_crtc(state, crtc);
+ }
+
while (update_pipes) {
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
@@ -7047,6 +6952,15 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((update_pipes & BIT(pipe)) == 0)
continue;
+ intel_pre_update_crtc(state, crtc);
+ }
+
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
+ if ((update_pipes & BIT(pipe)) == 0)
+ continue;
+
drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
entries, I915_MAX_PIPES, pipe));
@@ -7060,49 +6974,24 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
drm_WARN_ON(&dev_priv->drm, update_pipes);
}
-static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
-{
- struct intel_atomic_state *state, *next;
- struct llist_node *freed;
-
- freed = llist_del_all(&dev_priv->display.atomic_helper.free_list);
- llist_for_each_entry_safe(state, next, freed, freed)
- drm_atomic_state_put(&state->base);
-}
-
-void intel_atomic_helper_free_state_worker(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), display.atomic_helper.free_work);
-
- intel_atomic_helper_free_state(dev_priv);
-}
-
static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
{
- struct wait_queue_entry wait_fence, wait_reset;
- struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
-
- init_wait_entry(&wait_fence, 0);
- init_wait_entry(&wait_reset, 0);
- for (;;) {
- prepare_to_wait(&intel_state->commit_ready.wait,
- &wait_fence, TASK_UNINTERRUPTIBLE);
- prepare_to_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
- I915_RESET_MODESET),
- &wait_reset, TASK_UNINTERRUPTIBLE);
-
+ struct drm_i915_private *i915 = to_i915(intel_state->base.dev);
+ struct drm_plane *plane;
+ struct drm_plane_state *new_plane_state;
+ int ret, i;
- if (i915_sw_fence_done(&intel_state->commit_ready) ||
- test_bit(I915_RESET_MODESET, &to_gt(dev_priv)->reset.flags))
- break;
+ for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) {
+ if (new_plane_state->fence) {
+ ret = dma_fence_wait_timeout(new_plane_state->fence, false,
+ i915_fence_timeout(i915));
+ if (ret <= 0)
+ break;
- schedule();
+ dma_fence_put(new_plane_state->fence);
+ new_plane_state->fence = NULL;
+ }
}
- finish_wait(&intel_state->commit_ready.wait, &wait_fence);
- finish_wait(bit_waitqueue(&to_gt(dev_priv)->reset.flags,
- I915_RESET_MODESET),
- &wait_reset);
}
static void intel_atomic_cleanup_work(struct work_struct *work)
@@ -7120,8 +7009,6 @@ static void intel_atomic_cleanup_work(struct work_struct *work)
drm_atomic_helper_cleanup_planes(&i915->drm, &state->base);
drm_atomic_helper_commit_cleanup_done(&state->base);
drm_atomic_state_put(&state->base);
-
- intel_atomic_helper_free_state(i915);
}
static void intel_atomic_prepare_plane_clear_colors(struct intel_atomic_state *state)
@@ -7394,32 +7281,6 @@ static void intel_atomic_commit_work(struct work_struct *work)
intel_atomic_commit_tail(state);
}
-static int
-intel_atomic_commit_ready(struct i915_sw_fence *fence,
- enum i915_sw_fence_notify notify)
-{
- struct intel_atomic_state *state =
- container_of(fence, struct intel_atomic_state, commit_ready);
-
- switch (notify) {
- case FENCE_COMPLETE:
- /* we do blocking waits in the worker, nothing to do here */
- break;
- case FENCE_FREE:
- {
- struct drm_i915_private *i915 = to_i915(state->base.dev);
- struct intel_atomic_helper *helper =
- &i915->display.atomic_helper;
-
- if (llist_add(&state->freed, &helper->free_list))
- queue_work(i915->unordered_wq, &helper->free_work);
- break;
- }
- }
-
- return NOTIFY_DONE;
-}
-
static void intel_atomic_track_fbs(struct intel_atomic_state *state)
{
struct intel_plane_state *old_plane_state, *new_plane_state;
@@ -7442,10 +7303,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- drm_atomic_state_get(&state->base);
- i915_sw_fence_init(&state->commit_ready,
- intel_atomic_commit_ready);
-
/*
* The intel_legacy_cursor_update() fast path takes care
* of avoiding the vblank waits for simple cursor
@@ -7478,7 +7335,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
if (ret) {
drm_dbg_atomic(&dev_priv->drm,
"Preparing state failed with %i\n", ret);
- i915_sw_fence_commit(&state->commit_ready);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
}
@@ -7494,8 +7350,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
struct intel_crtc *crtc;
int i;
- i915_sw_fence_commit(&state->commit_ready);
-
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
intel_color_cleanup_commit(new_crtc_state);
@@ -7509,7 +7363,6 @@ int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state,
drm_atomic_state_get(&state->base);
INIT_WORK(&state->base.commit_work, intel_atomic_commit_work);
- i915_sw_fence_commit(&state->commit_ready);
if (nonblock && state->modeset) {
queue_work(dev_priv->display.wq.modeset, &state->base.commit_work);
} else if (nonblock) {
@@ -7909,7 +7762,7 @@ enum drm_mode_status intel_cpu_transcoder_mode_valid(struct drm_i915_private *de
* Cantiga+ cannot handle modes with a hsync front porch of 0.
* WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
*/
- if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ if ((DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) &&
mode->hsync_start == mode->hdisplay)
return MODE_H_ILLEGAL;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index a05c7e2b78..f4a0773f0f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -105,7 +105,6 @@ enum i9xx_plane_id {
};
#define plane_name(p) ((p) + 'A')
-#define sprite_name(p, s) ((p) * DISPLAY_RUNTIME_INFO(dev_priv)->num_sprites[(p)] + (s) + 'A')
#define for_each_plane_id_on_crtc(__crtc, __p) \
for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \
@@ -395,8 +394,8 @@ u8 intel_calc_active_pipes(struct intel_atomic_state *state,
u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
- struct intel_link_m_n *m_n,
- bool fec_enable);
+ int bw_overhead,
+ struct intel_link_m_n *m_n);
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier);
enum drm_mode_status
@@ -485,8 +484,6 @@ void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc,
void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
enum transcoder cpu_transcoder,
struct intel_link_m_n *m_n);
-void i9xx_crtc_clock_get(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
@@ -555,7 +552,7 @@ bool assert_port_valid(struct drm_i915_private *i915, enum port port);
struct drm_device *drm = &(__i915)->drm; \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
- if (!drm_WARN(drm, i915_modparams.verbose_state_checks, format)) \
+ if (!drm_WARN(drm, __i915->display.params.verbose_state_checks, format)) \
drm_err(drm, format); \
unlikely(__ret_warn_on); \
})
diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h
index ccfe27630f..47297ed858 100644
--- a/drivers/gpu/drm/i915/display/intel_display_core.h
+++ b/drivers/gpu/drm/i915/display/intel_display_core.h
@@ -19,6 +19,7 @@
#include "intel_cdclk.h"
#include "intel_display_device.h"
#include "intel_display_limits.h"
+#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_dpll_mgr.h"
#include "intel_fbc.h"
@@ -297,12 +298,6 @@ struct intel_display {
const struct intel_audio_funcs *audio;
} funcs;
- /* Grouping using anonymous structs. Keep sorted. */
- struct intel_atomic_helper {
- struct llist_head free_list;
- struct work_struct free_work;
- } atomic_helper;
-
struct {
/* backlight registers and fields in struct intel_panel */
struct mutex lock;
@@ -348,15 +343,6 @@ struct intel_display {
} dbuf;
struct {
- wait_queue_head_t waitqueue;
-
- /* mutex to protect pmdemand programming sequence */
- struct mutex lock;
-
- struct intel_global_obj obj;
- } pmdemand;
-
- struct {
/*
* dkl.phy_lock protects against concurrent access of the
* Dekel TypeC PHYs.
@@ -444,6 +430,15 @@ struct intel_display {
} ips;
struct {
+ wait_queue_head_t waitqueue;
+
+ /* mutex to protect pmdemand programming sequence */
+ struct mutex lock;
+
+ struct intel_global_obj obj;
+ } pmdemand;
+
+ struct {
struct i915_power_domains domains;
/* Shadow for DISPLAY_PHY_CONTROL which can't be safely read */
@@ -520,6 +515,7 @@ struct intel_display {
struct intel_hotplug hotplug;
struct intel_opregion opregion;
struct intel_overlay *overlay;
+ struct intel_display_params params;
struct intel_vbt_data vbt;
struct intel_wm wm;
};
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 2836826f8c..d951edb366 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -17,6 +17,7 @@
#include "intel_de.h"
#include "intel_crtc_state_dump.h"
#include "intel_display_debugfs.h"
+#include "intel_display_debugfs_params.h"
#include "intel_display_power.h"
#include "intel_display_power_well.h"
#include "intel_display_types.h"
@@ -641,6 +642,17 @@ static int i915_display_info(struct seq_file *m, void *unused)
return 0;
}
+static int i915_display_capabilities(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_display_device_info_print(DISPLAY_INFO(i915),
+ DISPLAY_RUNTIME_INFO(i915), &p);
+
+ return 0;
+}
+
static int i915_shared_dplls_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1059,6 +1071,7 @@ static const struct drm_info_list intel_display_debugfs_list[] = {
{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_power_domain_info", i915_power_domain_info, 0},
{"i915_display_info", i915_display_info, 0},
+ {"i915_display_capabilities", i915_display_capabilities, 0},
{"i915_shared_dplls_info", i915_shared_dplls_info, 0},
{"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_ddb_info", i915_ddb_info, 0},
@@ -1082,7 +1095,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
debugfs_create_file(intel_display_debugfs_files[i].name,
- S_IRUGO | S_IWUSR,
+ 0644,
minor->debugfs_root,
to_i915(minor->dev),
intel_display_debugfs_files[i].fops);
@@ -1098,15 +1111,15 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_hpd_debugfs_register(i915);
intel_psr_debugfs_register(i915);
intel_wm_debugfs_register(i915);
+ intel_display_debugfs_params(i915);
}
static int i915_panel_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- intel_attached_dp(to_intel_connector(connector));
+ struct intel_connector *connector = m->private;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return -ENODEV;
seq_printf(m, "Panel power up delay: %d\n",
@@ -1124,23 +1137,23 @@ DEFINE_SHOW_ATTRIBUTE(i915_panel);
static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- if (!connector->encoder || connector->status != connector_status_connected) {
+ if (!connector->base.encoder ||
+ connector->base.status != connector_status_connected) {
ret = -ENODEV;
goto out;
}
- seq_printf(m, "%s:%d HDCP version: ", connector->name,
- connector->base.id);
- intel_hdcp_info(m, intel_connector);
+ seq_printf(m, "%s:%d HDCP version: ", connector->base.name,
+ connector->base.base.id);
+ intel_hdcp_info(m, connector);
out:
drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
@@ -1151,16 +1164,16 @@ DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_i915_private *i915 = to_i915(connector->dev);
- struct intel_encoder *encoder;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ int connector_type = connector->base.connector_type;
bool lpsp_capable = false;
- encoder = intel_attached_encoder(to_intel_connector(connector));
if (!encoder)
return -ENODEV;
- if (connector->status != connector_status_connected)
+ if (connector->base.status != connector_status_connected)
return -ENODEV;
if (DISPLAY_VER(i915) >= 13)
@@ -1173,15 +1186,15 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
*/
lpsp_capable = encoder->port <= PORT_B;
else if (DISPLAY_VER(i915) == 11)
- lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ lpsp_capable = (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP);
else if (IS_DISPLAY_VER(i915, 9, 10))
lpsp_capable = (encoder->port == PORT_A &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort));
else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
+ lpsp_capable = connector_type == DRM_MODE_CONNECTOR_eDP;
seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
@@ -1191,7 +1204,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_lpsp_capability);
static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
{
- struct intel_connector *connector = to_intel_connector(m->private);
+ struct intel_connector *connector = m->private;
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct drm_crtc *crtc;
struct intel_dp *intel_dp;
@@ -1242,6 +1255,8 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
DP_DSC_YCbCr420_Native)),
str_yes_no(drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd,
DP_DSC_YCbCr444)));
+ seq_printf(m, "DSC_Sink_BPP_Precision: %d\n",
+ drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd));
seq_printf(m, "Force_DSC_Enable: %s\n",
str_yes_no(intel_dp->force_dsc_en));
if (!intel_dp_is_edp(intel_dp))
@@ -1259,13 +1274,13 @@ static ssize_t i915_dsc_fec_support_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bool dsc_enable = false;
int ret;
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (len == 0)
return 0;
@@ -1303,22 +1318,22 @@ static const struct file_operations i915_dsc_fec_support_fops = {
static int i915_dsc_bpc_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
int ret;
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
ret = -ENODEV;
goto out;
}
@@ -1326,7 +1341,7 @@ static int i915_dsc_bpc_show(struct seq_file *m, void *data)
crtc_state = to_intel_crtc_state(crtc->state);
seq_printf(m, "Input_BPC: %d\n", crtc_state->dsc.config.bits_per_component);
-out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
@@ -1335,9 +1350,9 @@ static ssize_t i915_dsc_bpc_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int dsc_bpc = 0;
int ret;
@@ -1369,22 +1384,22 @@ static const struct file_operations i915_dsc_bpc_fops = {
static int i915_dsc_output_format_show(struct seq_file *m, void *data)
{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct drm_crtc *crtc;
struct intel_crtc_state *crtc_state;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
int ret;
if (!encoder)
return -ENODEV;
- ret = drm_modeset_lock_single_interruptible(&dev->mode_config.connection_mutex);
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
if (ret)
return ret;
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
ret = -ENODEV;
goto out;
}
@@ -1393,7 +1408,7 @@ static int i915_dsc_output_format_show(struct seq_file *m, void *data)
seq_printf(m, "DSC_Output_Format: %s\n",
intel_output_format_name(crtc_state->output_format));
-out: drm_modeset_unlock(&dev->mode_config.connection_mutex);
+out: drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
return ret;
}
@@ -1402,9 +1417,9 @@ static ssize_t i915_dsc_output_format_write(struct file *file,
const char __user *ubuf,
size_t len, loff_t *offp)
{
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int dsc_output_format = 0;
int ret;
@@ -1434,6 +1449,84 @@ static const struct file_operations i915_dsc_output_format_fops = {
.write = i915_dsc_output_format_write
};
+static int i915_dsc_fractional_bpp_show(struct seq_file *m, void *data)
+{
+ struct intel_connector *connector = m->private;
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ int ret;
+
+ if (!encoder)
+ return -ENODEV;
+
+ ret = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ if (ret)
+ return ret;
+
+ crtc = connector->base.state->crtc;
+ if (connector->base.status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ intel_dp = intel_attached_dp(connector);
+ seq_printf(m, "Force_DSC_Fractional_BPP_Enable: %s\n",
+ str_yes_no(intel_dp->force_dsc_fractional_bpp_en));
+
+out:
+ drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fractional_bpp_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct intel_connector *connector = m->private;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ bool dsc_fractional_bpp_enable = false;
+ int ret;
+
+ if (len == 0)
+ return 0;
+
+ drm_dbg(&i915->drm,
+ "Copied %zu bytes from user to force fractional bpp for DSC\n", len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_fractional_bpp_enable);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg(&i915->drm, "Got %s for DSC Fractional BPP Enable\n",
+ (dsc_fractional_bpp_enable) ? "true" : "false");
+ intel_dp->force_dsc_fractional_bpp_en = dsc_fractional_bpp_enable;
+
+ *offp += len;
+
+ return len;
+}
+
+static int i915_dsc_fractional_bpp_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fractional_bpp_show, inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fractional_bpp_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fractional_bpp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fractional_bpp_write
+};
+
/*
* Returns the Current CRTC's bpc.
* Example usage: cat /sys/kernel/debug/dri/0/crtc-0/i915_current_bpc
@@ -1470,39 +1563,38 @@ DEFINE_SHOW_ATTRIBUTE(intel_crtc_pipe);
/**
* intel_connector_debugfs_add - add i915 specific connector debugfs files
- * @intel_connector: pointer to a registered drm_connector
+ * @connector: pointer to a registered intel_connector
*
* Cleanup will be done by drm_connector_unregister() through a call to
* drm_debugfs_connector_remove().
*/
-void intel_connector_debugfs_add(struct intel_connector *intel_connector)
+void intel_connector_debugfs_add(struct intel_connector *connector)
{
- struct drm_connector *connector = &intel_connector->base;
- struct dentry *root = connector->debugfs_entry;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct dentry *root = connector->base.debugfs_entry;
+ int connector_type = connector->base.connector_type;
/* The connector must have been registered beforehands. */
if (!root)
return;
- intel_drrs_connector_debugfs_add(intel_connector);
- intel_psr_connector_debugfs_add(intel_connector);
+ intel_drrs_connector_debugfs_add(connector);
+ intel_psr_connector_debugfs_add(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ if (connector_type == DRM_MODE_CONNECTOR_eDP)
+ debugfs_create_file("i915_panel_timings", 0444, root,
connector, &i915_panel_fops);
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ if (connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", 0444, root,
connector, &i915_hdcp_sink_capability_fops);
}
- if (DISPLAY_VER(dev_priv) >= 11 &&
- ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
- !to_intel_connector(connector)->mst_port) ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+ if (DISPLAY_VER(i915) >= 11 &&
+ ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !connector->mst_port) ||
+ connector_type == DRM_MODE_CONNECTOR_eDP)) {
debugfs_create_file("i915_dsc_fec_support", 0644, root,
connector, &i915_dsc_fec_support_fops);
@@ -1511,13 +1603,16 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector)
debugfs_create_file("i915_dsc_output_format", 0644, root,
connector, &i915_dsc_output_format_fops);
+
+ debugfs_create_file("i915_dsc_fractional_bpp", 0644, root,
+ connector, &i915_dsc_fractional_bpp_fops);
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ if (connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector_type == DRM_MODE_CONNECTOR_HDMIB)
debugfs_create_file("i915_lpsp_capability", 0444, root,
connector, &i915_lpsp_capability_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
new file mode 100644
index 0000000000..b7e68eb624
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drm_drv.h>
+
+#include "intel_display_debugfs_params.h"
+#include "i915_drv.h"
+#include "intel_display_params.h"
+
+/* int param */
+static int intel_display_param_int_show(struct seq_file *m, void *data)
+{
+ int *value = m->private;
+
+ seq_printf(m, "%d\n", *value);
+
+ return 0;
+}
+
+static int intel_display_param_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_display_param_int_show, inode->i_private);
+}
+
+static ssize_t intel_display_param_int_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int *value = m->private;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations intel_display_param_int_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_int_open,
+ .read = seq_read,
+ .write = intel_display_param_int_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations intel_display_param_int_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_int_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* unsigned int param */
+static int intel_display_param_uint_show(struct seq_file *m, void *data)
+{
+ unsigned int *value = m->private;
+
+ seq_printf(m, "%u\n", *value);
+
+ return 0;
+}
+
+static int intel_display_param_uint_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, intel_display_param_uint_show, inode->i_private);
+}
+
+static ssize_t intel_display_param_uint_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ unsigned int *value = m->private;
+ int ret;
+
+ ret = kstrtouint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations intel_display_param_uint_fops = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_uint_open,
+ .read = seq_read,
+ .write = intel_display_param_uint_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations intel_display_param_uint_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = intel_display_param_uint_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+#define RO(mode) (((mode) & 0222) == 0)
+
+__maybe_unused static struct dentry *
+intel_display_debugfs_create_int(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &intel_display_param_int_fops_ro :
+ &intel_display_param_int_fops);
+}
+
+__maybe_unused static struct dentry *
+intel_display_debugfs_create_uint(const char *name, umode_t mode,
+ struct dentry *parent, unsigned int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &intel_display_param_uint_fops_ro :
+ &intel_display_param_uint_fops);
+}
+
+#define _intel_display_param_create_file(parent, name, mode, valp) \
+ do { \
+ if (mode) \
+ _Generic(valp, \
+ bool * : debugfs_create_bool, \
+ int * : intel_display_debugfs_create_int, \
+ unsigned int * : intel_display_debugfs_create_uint, \
+ unsigned long * : debugfs_create_ulong, \
+ char ** : debugfs_create_str) \
+ (name, mode, parent, valp); \
+ } while (0)
+
+/* add a subdirectory with files for each intel display param */
+void intel_display_debugfs_params(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ struct dentry *dir;
+ char dirname[16];
+
+ snprintf(dirname, sizeof(dirname), "%s_params", i915->drm.driver->name);
+ dir = debugfs_lookup(dirname, minor->debugfs_root);
+ if (!dir)
+ dir = debugfs_create_dir(dirname, minor->debugfs_root);
+ if (IS_ERR(dir))
+ return;
+
+ /*
+ * Note: We could create files for params needing special handling
+ * here. Set mode in params to 0 to skip the generic create file, or
+ * just let the generic create file fail silently with -EEXIST.
+ */
+
+#define REGISTER(T, x, unused, mode, ...) _intel_display_param_create_file( \
+ dir, #x, mode, &i915->display.params.x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(REGISTER);
+#undef REGISTER
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
new file mode 100644
index 0000000000..1e9945a404
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_PARAMS__
+#define __INTEL_DISPLAY_DEBUGFS_PARAMS__
+
+struct drm_i915_private;
+
+void intel_display_debugfs_params(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index 2b1ec23ba9..0b522c6a8d 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -12,6 +12,7 @@
#include "intel_de.h"
#include "intel_display.h"
#include "intel_display_device.h"
+#include "intel_display_params.h"
#include "intel_display_power.h"
#include "intel_display_reg_defs.h"
#include "intel_fbc.h"
@@ -937,6 +938,13 @@ void intel_display_device_probe(struct drm_i915_private *i915)
DISPLAY_RUNTIME_INFO(i915)->ip.rel = rel;
DISPLAY_RUNTIME_INFO(i915)->ip.step = step;
}
+
+ intel_display_params_copy(&i915->display.params);
+}
+
+void intel_display_device_remove(struct drm_i915_private *i915)
+{
+ intel_display_params_free(&i915->display.params);
}
static void __intel_display_device_info_runtime_init(struct drm_i915_private *i915)
@@ -1105,7 +1113,7 @@ void intel_display_device_info_runtime_init(struct drm_i915_private *i915)
}
/* Disable nuclear pageflip by default on pre-g4x */
- if (!i915->params.nuclear_pageflip &&
+ if (!i915->display.params.nuclear_pageflip &&
DISPLAY_VER(i915) < 5 && !IS_G4X(i915))
i915->drm.driver_features &= ~DRIVER_ATOMIC;
}
@@ -1145,5 +1153,6 @@ bool intel_display_device_enabled(struct drm_i915_private *i915)
/* Only valid when HAS_DISPLAY() is true */
drm_WARN_ON(&i915->drm, !HAS_DISPLAY(i915));
- return !i915->params.disable_display && !intel_opregion_headless_sku(i915);
+ return !i915->display.params.disable_display &&
+ !intel_opregion_headless_sku(i915);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 5b5c0e5330..9b1bce2624 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -36,7 +36,7 @@ struct drm_printer;
#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
#define HAS_CDCLK_CRAWL(i915) (DISPLAY_INFO(i915)->has_cdclk_crawl)
#define HAS_CDCLK_SQUASH(i915) (DISPLAY_INFO(i915)->has_cdclk_squash)
-#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && DISPLAY_VER(i915) >= 7)
+#define HAS_CUR_FBC(i915) (!HAS_GMCH(i915) && IS_DISPLAY_VER(i915, 7, 13))
#define HAS_D12_PLANE_MINIMIZATION(i915) (IS_ROCKETLAKE(i915) || IS_ALDERLAKE_S(i915))
#define HAS_DDI(i915) (DISPLAY_INFO(i915)->has_ddi)
#define HAS_DISPLAY(i915) (DISPLAY_RUNTIME_INFO(i915)->pipe_mask != 0)
@@ -47,9 +47,10 @@ struct drm_printer;
#define HAS_DPT(i915) (DISPLAY_VER(i915) >= 13)
#define HAS_DSB(i915) (DISPLAY_INFO(i915)->has_dsb)
#define HAS_DSC(__i915) (DISPLAY_RUNTIME_INFO(__i915)->has_dsc)
+#define HAS_DSC_MST(__i915) (DISPLAY_VER(__i915) >= 12 && HAS_DSC(__i915))
#define HAS_FBC(i915) (DISPLAY_RUNTIME_INFO(i915)->fbc_mask != 0)
#define HAS_FPGA_DBG_UNCLAIMED(i915) (DISPLAY_INFO(i915)->has_fpga_dbg)
-#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) > 2)
+#define HAS_FW_BLC(i915) (DISPLAY_VER(i915) >= 3)
#define HAS_GMBUS_IRQ(i915) (DISPLAY_VER(i915) >= 4)
#define HAS_GMBUS_BURST_READ(i915) (DISPLAY_VER(i915) >= 10 || IS_KABYLAKE(i915))
#define HAS_GMCH(i915) (DISPLAY_INFO(i915)->has_gmch)
@@ -161,6 +162,7 @@ struct intel_display_device_info {
bool intel_display_device_enabled(struct drm_i915_private *i915);
void intel_display_device_probe(struct drm_i915_private *i915);
+void intel_display_device_remove(struct drm_i915_private *i915);
void intel_display_device_info_runtime_init(struct drm_i915_private *i915);
void intel_display_device_info_print(const struct intel_display_device_info *info,
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 44b59ac301..9df9097a02 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -181,6 +181,13 @@ void intel_display_driver_early_probe(struct drm_i915_private *i915)
if (!HAS_DISPLAY(i915))
return;
+ spin_lock_init(&i915->display.fb_tracking.lock);
+ mutex_init(&i915->display.backlight.lock);
+ mutex_init(&i915->display.audio.mutex);
+ mutex_init(&i915->display.wm.wm_mutex);
+ mutex_init(&i915->display.pps.mutex);
+ mutex_init(&i915->display.hdcp.hdcp_mutex);
+
intel_display_irq_init(i915);
intel_dkl_phy_init(i915);
intel_color_init_hooks(i915);
@@ -252,10 +259,6 @@ int intel_display_driver_probe_noirq(struct drm_i915_private *i915)
if (ret)
goto cleanup_vga_client_pw_domain_dmc;
- init_llist_head(&i915->display.atomic_helper.free_list);
- INIT_WORK(&i915->display.atomic_helper.free_work,
- intel_atomic_helper_free_state_worker);
-
intel_init_quirks(i915);
intel_fbc_init(i915);
@@ -423,9 +426,6 @@ void intel_display_driver_remove(struct drm_i915_private *i915)
flush_workqueue(i915->display.wq.flip);
flush_workqueue(i915->display.wq.modeset);
- flush_work(&i915->display.atomic_helper.free_work);
- drm_WARN_ON(&i915->drm, !llist_empty(&i915->display.atomic_helper.free_list));
-
/*
* MST topology needs to be suspended so we don't have any calls to
* fbdev after it's finalized. MST will be destroyed later as part of
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index bff4a76310..a7d8f3fc98 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -340,18 +340,15 @@ static void flip_done_handler(struct drm_i915_private *i915,
enum pipe pipe)
{
struct intel_crtc *crtc = intel_crtc_for_pipe(i915, pipe);
- struct drm_crtc_state *crtc_state = crtc->base.state;
- struct drm_pending_vblank_event *e = crtc_state->event;
- struct drm_device *dev = &i915->drm;
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev->event_lock, irqflags);
- crtc_state->event = NULL;
+ spin_lock(&i915->drm.event_lock);
- drm_crtc_send_vblank_event(&crtc->base, e);
+ if (crtc->flip_done_event) {
+ drm_crtc_send_vblank_event(&crtc->base, crtc->flip_done_event);
+ crtc->flip_done_event = NULL;
+ }
- spin_unlock_irqrestore(&dev->event_lock, irqflags);
+ spin_unlock(&i915->drm.event_lock);
}
static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
@@ -896,7 +893,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (!found)
- drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
+ drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt: 0x%08x\n", iir);
}
static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv,
@@ -1653,7 +1650,7 @@ void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
else if (HAS_PCH_SPLIT(dev_priv))
ibx_irq_postinstall(dev_priv);
- if (DISPLAY_VER(dev_priv) <= 10)
+ if (DISPLAY_VER(dev_priv) < 11)
de_misc_masked |= GEN8_DE_MISC_GSE;
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c
new file mode 100644
index 0000000000..11e03cfb77
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_params.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "intel_display_params.h"
+#include "i915_drv.h"
+
+#define intel_display_param_named(name, T, perm, desc) \
+ module_param_named(name, intel_display_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+#define intel_display_param_named_unsafe(name, T, perm, desc) \
+ module_param_named_unsafe(name, intel_display_modparams.name, T, perm); \
+ MODULE_PARM_DESC(name, desc)
+
+static struct intel_display_params intel_display_modparams __read_mostly = {
+#define MEMBER(T, member, value, ...) .member = (value),
+ INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER)
+#undef MEMBER
+};
+/*
+ * Note: As a rule, keep module parameter sysfs permissions read-only
+ * 0400. Runtime changes are only supported through i915 debugfs.
+ *
+ * For any exceptions requiring write access and runtime changes through module
+ * parameter sysfs, prevent debugfs file creation by setting the parameter's
+ * debugfs mode to 0.
+ */
+
+intel_display_param_named_unsafe(vbt_firmware, charp, 0400,
+ "Load VBT from specified file under /lib/firmware");
+
+intel_display_param_named_unsafe(lvds_channel_mode, int, 0400,
+ "Specify LVDS channel mode "
+ "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
+
+intel_display_param_named_unsafe(panel_use_ssc, int, 0400,
+ "Use Spread Spectrum Clock with panels [LVDS/eDP] "
+ "(default: auto from VBT)");
+
+intel_display_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
+ "Override/Ignore selection of SDVO panel mode in the VBT "
+ "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
+
+intel_display_param_named_unsafe(enable_dc, int, 0400,
+ "Enable power-saving display C-states. "
+ "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
+ "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
+
+intel_display_param_named_unsafe(enable_dpt, bool, 0400,
+ "Enable display page table (DPT) (default: true)");
+
+intel_display_param_named_unsafe(enable_sagv, bool, 0400,
+ "Enable system agent voltage/frequency scaling (SAGV) (default: true)");
+
+intel_display_param_named_unsafe(disable_power_well, int, 0400,
+ "Disable display power wells when possible "
+ "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
+
+intel_display_param_named_unsafe(enable_ips, bool, 0400, "Enable IPS (default: true)");
+
+intel_display_param_named_unsafe(invert_brightness, int, 0400,
+ "Invert backlight brightness "
+ "(-1 force normal, 0 machine defaults, 1 force inversion), please "
+ "report PCI device ID, subsystem vendor and subsystem device ID "
+ "to dri-devel@lists.freedesktop.org, if your machine needs it. "
+ "It will then be included in an upcoming module version.");
+
+/* WA to get away with the default setting in VBT for early platforms.Will be removed */
+intel_display_param_named_unsafe(edp_vswing, int, 0400,
+ "Ignore/Override vswing pre-emph table selection from VBT "
+ "(0=use value from vbt [default], 1=low power swing(200mV),"
+ "2=default swing(400mV))");
+
+intel_display_param_named(enable_dpcd_backlight, int, 0400,
+ "Enable support for DPCD backlight control"
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
+
+intel_display_param_named_unsafe(load_detect_test, bool, 0400,
+ "Force-enable the VGA load detect code for testing (default:false). "
+ "For developers only.");
+
+intel_display_param_named_unsafe(force_reset_modeset_test, bool, 0400,
+ "Force a modeset during gpu reset for testing (default:false). "
+ "For developers only.");
+
+intel_display_param_named(disable_display, bool, 0400,
+ "Disable display (default: false)");
+
+intel_display_param_named(verbose_state_checks, bool, 0400,
+ "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
+
+intel_display_param_named_unsafe(nuclear_pageflip, bool, 0400,
+ "Force enable atomic functionality on platforms that don't have full support yet.");
+
+intel_display_param_named_unsafe(enable_dp_mst, bool, 0400,
+ "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
+
+intel_display_param_named_unsafe(enable_fbc, int, 0400,
+ "Enable frame buffer compression for power savings "
+ "(default: -1 (use per-chip default))");
+
+intel_display_param_named_unsafe(enable_psr, int, 0400,
+ "Enable PSR "
+ "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
+ "Default: -1 (use per-chip default)");
+
+intel_display_param_named(psr_safest_params, bool, 0400,
+ "Replace PSR VBT parameters by the safest and not optimal ones. This "
+ "is helpful to detect if PSR issues are related to bad values set in "
+ " VBT. (0=use VBT parameters, 1=use safest parameters)"
+ "Default: 0");
+
+intel_display_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
+ "Enable PSR2 selective fetch "
+ "(0=disabled, 1=enabled) "
+ "Default: 1");
+
+__maybe_unused
+static void _param_print_bool(struct drm_printer *p, const char *driver_name,
+ const char *name, bool val)
+{
+ drm_printf(p, "%s.%s=%s\n", driver_name, name, str_yes_no(val));
+}
+
+__maybe_unused
+static void _param_print_int(struct drm_printer *p, const char *driver_name,
+ const char *name, int val)
+{
+ drm_printf(p, "%s.%s=%d\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_uint(struct drm_printer *p, const char *driver_name,
+ const char *name, unsigned int val)
+{
+ drm_printf(p, "%s.%s=%u\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_ulong(struct drm_printer *p, const char *driver_name,
+ const char *name, unsigned long val)
+{
+ drm_printf(p, "%s.%s=%lu\n", driver_name, name, val);
+}
+
+__maybe_unused
+static void _param_print_charp(struct drm_printer *p, const char *driver_name,
+ const char *name, const char *val)
+{
+ drm_printf(p, "%s.%s=%s\n", driver_name, name, val);
+}
+
+#define _param_print(p, driver_name, name, val) \
+ _Generic(val, \
+ bool : _param_print_bool, \
+ int : _param_print_int, \
+ unsigned int : _param_print_uint, \
+ unsigned long : _param_print_ulong, \
+ char * : _param_print_charp)(p, driver_name, name, val)
+
+/**
+ * intel_display_params_dump - dump intel display modparams
+ * @i915: i915 device
+ * @p: the &drm_printer
+ *
+ * Pretty printer for i915 modparams.
+ */
+void intel_display_params_dump(struct drm_i915_private *i915, struct drm_printer *p)
+{
+#define PRINT(T, x, ...) _param_print(p, i915->drm.driver->name, #x, i915->display.params.x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(PRINT);
+#undef PRINT
+}
+
+__maybe_unused static void _param_dup_charp(char **valp)
+{
+ *valp = kstrdup(*valp ? *valp : "", GFP_ATOMIC);
+}
+
+__maybe_unused static void _param_nop(void *valp)
+{
+}
+
+#define _param_dup(valp) \
+ _Generic(valp, \
+ char ** : _param_dup_charp, \
+ default : _param_nop) \
+ (valp)
+
+void intel_display_params_copy(struct intel_display_params *dest)
+{
+ *dest = intel_display_modparams;
+#define DUP(T, x, ...) _param_dup(&dest->x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(DUP);
+#undef DUP
+}
+
+__maybe_unused static void _param_free_charp(char **valp)
+{
+ kfree(*valp);
+ *valp = NULL;
+}
+
+#define _param_free(valp) \
+ _Generic(valp, \
+ char ** : _param_free_charp, \
+ default : _param_nop) \
+ (valp)
+
+/* free the allocated members, *not* the passed in params itself */
+void intel_display_params_free(struct intel_display_params *params)
+{
+#define FREE(T, x, ...) _param_free(&params->x);
+ INTEL_DISPLAY_PARAMS_FOR_EACH(FREE);
+#undef FREE
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h
new file mode 100644
index 0000000000..6206cc51df
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_params.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_DISPLAY_PARAMS_H_
+#define _INTEL_DISPLAY_PARAMS_H_
+
+#include <linux/types.h>
+
+struct drm_printer;
+struct drm_i915_private;
+
+/*
+ * Invoke param, a function-like macro, for each intel display param, with
+ * arguments:
+ *
+ * param(type, name, value, mode)
+ *
+ * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
+ * name: name of the parameter
+ * value: initial/default value of the parameter
+ * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
+ * debugfs file
+ */
+#define INTEL_DISPLAY_PARAMS_FOR_EACH(param) \
+ param(char *, vbt_firmware, NULL, 0400) \
+ param(int, lvds_channel_mode, 0, 0400) \
+ param(int, panel_use_ssc, -1, 0600) \
+ param(int, vbt_sdvo_panel_type, -1, 0400) \
+ param(int, enable_dc, -1, 0400) \
+ param(bool, enable_dpt, true, 0400) \
+ param(bool, enable_sagv, true, 0600) \
+ param(int, disable_power_well, -1, 0400) \
+ param(bool, enable_ips, true, 0600) \
+ param(int, invert_brightness, 0, 0600) \
+ param(int, edp_vswing, 0, 0400) \
+ param(int, enable_dpcd_backlight, -1, 0600) \
+ param(bool, load_detect_test, false, 0600) \
+ param(bool, force_reset_modeset_test, false, 0600) \
+ param(bool, disable_display, false, 0400) \
+ param(bool, verbose_state_checks, true, 0400) \
+ param(bool, nuclear_pageflip, false, 0400) \
+ param(bool, enable_dp_mst, true, 0600) \
+ param(int, enable_fbc, -1, 0600) \
+ param(int, enable_psr, -1, 0600) \
+ param(bool, psr_safest_params, false, 0400) \
+ param(bool, enable_psr2_sel_fetch, true, 0400) \
+
+#define MEMBER(T, member, ...) T member;
+struct intel_display_params {
+ INTEL_DISPLAY_PARAMS_FOR_EACH(MEMBER);
+};
+#undef MEMBER
+
+void intel_display_params_dump(struct drm_i915_private *i915,
+ struct drm_printer *p);
+void intel_display_params_copy(struct intel_display_params *dest);
+void intel_display_params_free(struct intel_display_params *params);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index e25785ae1c..6fd4fa5225 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -405,8 +405,8 @@ print_async_put_domains_state(struct i915_power_domains *power_domains)
struct drm_i915_private,
display.power.domains);
- drm_dbg(&i915->drm, "async_put_wakeref %u\n",
- power_domains->async_put_wakeref);
+ drm_dbg(&i915->drm, "async_put_wakeref: %s\n",
+ str_yes_no(power_domains->async_put_wakeref));
print_power_domains(power_domains, "async_put_domains[0]",
&power_domains->async_put_domains[0]);
@@ -967,7 +967,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
DISPLAY_VER(dev_priv) >= 11 ?
DC_STATE_EN_DC9 : 0;
- if (!dev_priv->params.disable_power_well)
+ if (!dev_priv->display.params.disable_power_well)
max_dc = 0;
if (enable_dc >= 0 && enable_dc <= max_dc) {
@@ -1016,11 +1016,11 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
{
struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
- dev_priv->params.disable_power_well =
+ dev_priv->display.params.disable_power_well =
sanitize_disable_power_well_option(dev_priv,
- dev_priv->params.disable_power_well);
+ dev_priv->display.params.disable_power_well);
power_domains->allowed_dc_mask =
- get_allowed_dc_mask(dev_priv, dev_priv->params.enable_dc);
+ get_allowed_dc_mask(dev_priv, dev_priv->display.params.enable_dc);
power_domains->target_dc_state =
sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
@@ -1697,14 +1697,14 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
if (resume)
intel_dmc_load_program(dev_priv);
- /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p */
- if (DISPLAY_VER(dev_priv) >= 12)
+ /* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
+ if (IS_DISPLAY_IP_RANGE(dev_priv, IP_VER(12, 0), IP_VER(13, 0)))
intel_de_rmw(dev_priv, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(dev_priv) >= 13)
+ if (DISPLAY_VER(dev_priv) == 13)
intel_de_write(dev_priv, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
}
@@ -1950,7 +1950,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
intel_display_power_get(i915, POWER_DOMAIN_INIT);
/* Disable power support if the user asked so. */
- if (!i915->params.disable_power_well) {
+ if (!i915->display.params.disable_power_well) {
drm_WARN_ON(&i915->drm, power_domains->disable_wakeref);
i915->display.power.domains.disable_wakeref = intel_display_power_get(i915,
POWER_DOMAIN_INIT);
@@ -1977,7 +1977,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915)
fetch_and_zero(&i915->display.power.domains.init_wakeref);
/* Remove the refcount we took to keep power well support disabled. */
- if (!i915->params.disable_power_well)
+ if (!i915->display.params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
@@ -2096,7 +2096,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, bool s2idle)
* Even if power well support was disabled we still want to disable
* power wells if power domains must be deinitialized for suspend.
*/
- if (!i915->params.disable_power_well)
+ if (!i915->display.params.disable_power_well)
intel_display_power_put(i915, POWER_DOMAIN_INIT,
fetch_and_zero(&i915->display.power.domains.disable_wakeref));
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index 82dd5be72e..06900ff307 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -1411,20 +1411,16 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
{
enum i915_power_well_id id = i915_power_well_instance(power_well)->id;
enum dpio_phy phy;
- enum pipe pipe;
u32 tmp;
drm_WARN_ON_ONCE(&dev_priv->drm,
id != VLV_DISP_PW_DPIO_CMN_BC &&
id != CHV_DISP_PW_DPIO_CMN_D);
- if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- pipe = PIPE_A;
+ if (id == VLV_DISP_PW_DPIO_CMN_BC)
phy = DPIO_PHY0;
- } else {
- pipe = PIPE_C;
+ else
phy = DPIO_PHY1;
- }
/* since ref/cri clock was enabled */
udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
@@ -1439,24 +1435,24 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_get(dev_priv);
/* Enable dynamic power down */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW28);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW28);
tmp |= DPIO_DYNPWRDOWNEN_CH0 | DPIO_CL1POWERDOWNEN |
DPIO_SUS_CLK_CONFIG_GATE_CLKREQ;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW28, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW28, tmp);
if (id == VLV_DISP_PW_DPIO_CMN_BC) {
- tmp = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW6_CH1);
+ tmp = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW6_CH1);
tmp |= DPIO_DYNPWRDOWNEN_CH1;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW6_CH1, tmp);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW6_CH1, tmp);
} else {
/*
* Force the non-existing CL2 off. BXT does this
* too, so maybe it saves some power even though
* CL2 doesn't exist?
*/
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW30);
tmp |= DPIO_CL2_LDOFUSE_PWRENB;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW30, tmp);
}
vlv_dpio_put(dev_priv);
@@ -1510,7 +1506,6 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpio_phy phy,
enum dpio_channel ch, bool override, unsigned int mask)
{
- enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C;
u32 reg, val, expected, actual;
/*
@@ -1529,7 +1524,7 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
reg = _CHV_CMN_DW6_CH1;
vlv_dpio_get(dev_priv);
- val = vlv_dpio_read(dev_priv, pipe, reg);
+ val = vlv_dpio_read(dev_priv, phy, reg);
vlv_dpio_put(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index 17178d5d77..c2c347b224 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -29,7 +29,7 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
return;
/* reset doesn't touch the display */
- if (!dev_priv->params.force_reset_modeset_test &&
+ if (!dev_priv->display.params.force_reset_modeset_test &&
!gpu_reset_clobbers_display(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 65ea37fe8c..9529f77b87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -198,6 +198,12 @@ struct intel_encoder {
struct intel_encoder *,
const struct intel_crtc_state *,
const struct drm_connector_state *);
+ void (*audio_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
/* Read out the current hw state of this connector, returning true if
* the encoder is active. If the encoder is enabled it also set the pipe
* it is connected to in the pipe parameter. */
@@ -603,6 +609,13 @@ struct intel_connector {
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
+ /*
+ * Optional hook called during init/resume to sync any state
+ * stored in the connector (eg. DSC state) wrt. the HW state.
+ */
+ void (*sync_state)(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
+
/* Panel info for eDP and LVDS */
struct intel_panel panel;
@@ -624,6 +637,9 @@ struct intel_connector {
struct drm_dp_aux *dsc_decompression_aux;
u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE];
u8 fec_capability;
+
+ u8 dsc_hblank_expansion_quirk:1;
+ u8 dsc_decompression_enabled:1;
} dp;
/* Work struct to schedule a uevent on link train failure */
@@ -675,10 +691,6 @@ struct intel_atomic_state {
bool skip_intermediate_wm;
bool rps_interactive;
-
- struct i915_sw_fence commit_ready;
-
- struct llist_node freed;
};
struct intel_plane_state {
@@ -707,6 +719,7 @@ struct intel_plane_state {
#define PLANE_HAS_FENCE BIT(0)
struct intel_fb_view view;
+ u32 phys_dma_addr; /* for cursor_needs_physical */
/* Plane pxp decryption state */
bool decrypt;
@@ -1015,7 +1028,6 @@ struct intel_c10pll_state {
};
struct intel_c20pll_state {
- u32 link_bit_rate;
u32 clock; /* in kHz */
u16 tx[3];
u16 cmn[4];
@@ -1210,6 +1222,7 @@ struct intel_crtc_state {
bool has_psr2;
bool enable_psr2_sel_fetch;
bool req_psr2_sdp_prior_scanline;
+ bool has_panel_replay;
bool wm_level_disabled;
u32 dc3co_exitline;
u16 su_y_granularity;
@@ -1361,7 +1374,8 @@ struct intel_crtc_state {
struct {
bool compression_enable;
bool dsc_split;
- u16 compressed_bpp;
+ /* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
+ u16 compressed_bpp_x16;
u8 slice_count;
struct drm_dsc_config config;
} dsc;
@@ -1467,6 +1481,9 @@ struct intel_crtc {
struct intel_crtc_state *config;
+ /* armed event for async flip */
+ struct drm_pending_vblank_event *flip_done_event;
+
/* Access to these should be protected by dev_priv->irq_lock. */
bool cpu_fifo_underrun_disabled;
bool pch_fifo_underrun_disabled;
@@ -1707,9 +1724,13 @@ struct intel_psr {
bool irq_aux_error;
u16 su_w_granularity;
u16 su_y_granularity;
+ bool source_panel_replay_support;
+ bool sink_panel_replay_support;
+ bool panel_replay_enabled;
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
+ u8 entry_setup_frames;
};
struct intel_dp {
@@ -1808,6 +1829,7 @@ struct intel_dp {
/* Display stream compression testing */
bool force_dsc_en;
int force_dsc_output_format;
+ bool force_dsc_fractional_bpp_en;
int force_dsc_bpc;
bool hobl_failed;
@@ -1992,17 +2014,6 @@ dp_to_lspcon(struct intel_dp *intel_dp)
#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
- (intel_dp)->psr.source_support)
-
-static inline bool intel_encoder_can_psr(struct intel_encoder *encoder)
-{
- if (!intel_encoder_is_dp(encoder))
- return false;
-
- return CAN_PSR(enc_to_intel_dp(encoder));
-}
-
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 073b85b576..b70502586a 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -335,77 +335,6 @@ static void disable_event_handler(struct drm_i915_private *i915,
intel_de_write(i915, htp_reg, 0);
}
-static void
-disable_flip_queue_event(struct drm_i915_private *i915,
- i915_reg_t ctl_reg, i915_reg_t htp_reg)
-{
- u32 event_ctl;
- u32 event_htp;
-
- event_ctl = intel_de_read(i915, ctl_reg);
- event_htp = intel_de_read(i915, htp_reg);
- if (event_ctl != (DMC_EVT_CTL_ENABLE |
- DMC_EVT_CTL_RECURRING |
- REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
- DMC_EVT_CTL_TYPE_EDGE_0_1) |
- REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
- DMC_EVT_CTL_EVENT_ID_CLK_MSEC)) ||
- !event_htp) {
- drm_dbg_kms(&i915->drm,
- "Unexpected DMC event configuration (control %08x htp %08x)\n",
- event_ctl, event_htp);
- return;
- }
-
- disable_event_handler(i915, ctl_reg, htp_reg);
-}
-
-static bool
-get_flip_queue_event_regs(struct drm_i915_private *i915, enum intel_dmc_id dmc_id,
- i915_reg_t *ctl_reg, i915_reg_t *htp_reg)
-{
- if (dmc_id == DMC_FW_MAIN) {
- if (DISPLAY_VER(i915) == 12) {
- *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 3);
- *htp_reg = DMC_EVT_HTP(i915, dmc_id, 3);
-
- return true;
- }
- } else if (dmc_id >= DMC_FW_PIPEA && dmc_id <= DMC_FW_PIPED) {
- if (IS_DG2(i915)) {
- *ctl_reg = DMC_EVT_CTL(i915, dmc_id, 2);
- *htp_reg = DMC_EVT_HTP(i915, dmc_id, 2);
-
- return true;
- }
- }
-
- return false;
-}
-
-static void
-disable_all_flip_queue_events(struct drm_i915_private *i915)
-{
- enum intel_dmc_id dmc_id;
-
- /* TODO: check if the following applies to all D13+ platforms. */
- if (!IS_TIGERLAKE(i915))
- return;
-
- for_each_dmc_id(dmc_id) {
- i915_reg_t ctl_reg;
- i915_reg_t htp_reg;
-
- if (!has_dmc_id_fw(i915, dmc_id))
- continue;
-
- if (!get_flip_queue_event_regs(i915, dmc_id, &ctl_reg, &htp_reg))
- continue;
-
- disable_flip_queue_event(i915, ctl_reg, htp_reg);
- }
-}
-
static void disable_all_event_handlers(struct drm_i915_private *i915)
{
enum intel_dmc_id dmc_id;
@@ -503,6 +432,16 @@ static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
return offset >= start && offset < end;
}
+static bool is_dmc_evt_htp_reg(struct drm_i915_private *i915,
+ enum intel_dmc_id dmc_id, i915_reg_t reg)
+{
+ u32 offset = i915_mmio_reg_offset(reg);
+ u32 start = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, 0));
+ u32 end = i915_mmio_reg_offset(DMC_EVT_HTP(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
+
+ return offset >= start && offset < end;
+}
+
static bool disable_dmc_evt(struct drm_i915_private *i915,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
@@ -514,6 +453,16 @@ static bool disable_dmc_evt(struct drm_i915_private *i915,
if (dmc_id != DMC_FW_MAIN)
return true;
+ /* also disable the flip queue event on the main DMC on TGL */
+ if (IS_TIGERLAKE(i915) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_CLK_MSEC)
+ return true;
+
+ /* also disable the HRR event on the main DMC on TGL/ADLS */
+ if ((IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915)) &&
+ REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == DMC_EVT_CTL_EVENT_ID_VBLANK_A)
+ return true;
+
return false;
}
@@ -579,13 +528,6 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
gen9_set_dc_state_debugmask(i915);
- /*
- * Flip queue events need to be disabled before enabling DC5/6.
- * i915 doesn't use the flip queue feature, so disable it already
- * here.
- */
- disable_all_flip_queue_events(i915);
-
pipedmc_clock_gating_wa(i915, false);
}
@@ -781,9 +723,17 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
return 0;
}
+ drm_dbg_kms(&i915->drm, "DMC %d:\n", dmc_id);
for (i = 0; i < mmio_count; i++) {
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
dmc_info->mmiodata[i] = mmiodata[i];
+
+ drm_dbg_kms(&i915->drm, " mmio[%d]: 0x%x = 0x%x%s%s\n",
+ i, mmioaddr[i], mmiodata[i],
+ is_dmc_evt_ctl_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_CTL)" :
+ is_dmc_evt_htp_reg(i915, dmc_id, dmc_info->mmioaddr[i]) ? " (EVT_HTP)" : "",
+ disable_dmc_evt(i915, dmc_id, dmc_info->mmioaddr[i],
+ dmc_info->mmiodata[i]) ? " (disabling)" : "");
}
dmc_info->mmio_count = mmio_count;
dmc_info->start_mmioaddr = start_mmioaddr;
diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
index cf10094aca..90d0dbb41c 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h
@@ -60,6 +60,7 @@
#define DMC_EVT_CTL_EVENT_ID_MASK REG_GENMASK(15, 8)
#define DMC_EVT_CTL_EVENT_ID_FALSE 0x01
+#define DMC_EVT_CTL_EVENT_ID_VBLANK_A 0x32 /* main DMC */
/* An event handler scheduled to run at a 1 kHz frequency. */
#define DMC_EVT_CTL_EVENT_ID_CLK_MSEC 0xbf
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index ef09356bcb..4e8545126e 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -85,8 +85,8 @@
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
-/* DP DSC FEC Overhead factor = 1/(0.972261) */
-#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
+/* DP DSC FEC Overhead factor in ppm = 1/(0.972261) = 1.028530 */
+#define DP_DSC_FEC_OVERHEAD_FACTOR 1028530
/* Compliance test status bits */
#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
@@ -124,7 +124,31 @@ static void intel_dp_unset_edid(struct intel_dp *intel_dp);
/* Is link rate UHBR and thus 128b/132b? */
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->port_clock >= 1000000;
+ return drm_dp_is_uhbr_rate(crtc_state->port_clock);
+}
+
+/**
+ * intel_dp_link_symbol_size - get the link symbol size for a given link rate
+ * @rate: link rate in 10kbit/s units
+ *
+ * Returns the link symbol size in bits/symbol units depending on the link
+ * rate -> channel coding.
+ */
+int intel_dp_link_symbol_size(int rate)
+{
+ return drm_dp_is_uhbr_rate(rate) ? 32 : 10;
+}
+
+/**
+ * intel_dp_link_symbol_clock - convert link rate to link symbol clock
+ * @rate: link rate in 10kbit/s units
+ *
+ * Returns the link symbol clock frequency in kHz units depending on the
+ * link rate and channel coding.
+ */
+int intel_dp_link_symbol_clock(int rate)
+{
+ return DIV_ROUND_CLOSEST(rate * 10, intel_dp_link_symbol_size(rate));
}
static void intel_dp_set_default_sink_rates(struct intel_dp *intel_dp)
@@ -331,6 +355,9 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp)
/*
* The required data bandwidth for a mode with given pixel clock and bpp. This
* is the required net bandwidth independent of the data bandwidth efficiency.
+ *
+ * TODO: check if callers of this functions should use
+ * intel_dp_effective_data_rate() instead.
*/
int
intel_dp_link_required(int pixel_clock, int bpp)
@@ -339,6 +366,22 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
+/**
+ * intel_dp_effective_data_rate - Return the pixel data rate accounting for BW allocation overhead
+ * @pixel_clock: pixel clock in kHz
+ * @bpp_x16: bits per pixel .4 fixed point format
+ * @bw_overhead: BW allocation overhead in 1ppm units
+ *
+ * Return the effective pixel data rate in kB/sec units taking into account
+ * the provided SSC, FEC, DSC BW allocation overhead.
+ */
+int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
+ int bw_overhead)
+{
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_clock * bpp_x16, bw_overhead),
+ 1000000 * 16 * 8);
+}
+
/*
* Given a link rate and lanes, get the data bandwidth.
*
@@ -362,29 +405,27 @@ intel_dp_link_required(int pixel_clock, int bpp)
int
intel_dp_max_data_rate(int max_link_rate, int max_lanes)
{
- if (max_link_rate >= 1000000) {
- /*
- * UHBR rates always use 128b/132b channel encoding, and have
- * 97.71% data bandwidth efficiency. Consider max_link_rate the
- * link bit rate in units of 10000 bps.
- */
- int max_link_rate_kbps = max_link_rate * 10;
-
- max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(max_link_rate_kbps, 9671), 10000);
- max_link_rate = max_link_rate_kbps / 8;
- }
+ int ch_coding_efficiency =
+ drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(max_link_rate));
+ int max_link_rate_kbps = max_link_rate * 10;
/*
+ * UHBR rates always use 128b/132b channel encoding, and have
+ * 97.71% data bandwidth efficiency. Consider max_link_rate the
+ * link bit rate in units of 10000 bps.
+ */
+ /*
* Lower than UHBR rates always use 8b/10b channel encoding, and have
* 80% data bandwidth efficiency for SST non-FEC. However, this turns
- * out to be a nop by coincidence, and can be skipped:
+ * out to be a nop by coincidence:
*
* int max_link_rate_kbps = max_link_rate * 10;
- * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
+ * max_link_rate_kbps = DIV_ROUND_DOWN_ULL(max_link_rate_kbps * 8, 10);
* max_link_rate = max_link_rate_kbps / 8;
*/
-
- return max_link_rate * max_lanes;
+ return DIV_ROUND_DOWN_ULL(mul_u32_u32(max_link_rate_kbps * max_lanes,
+ ch_coding_efficiency),
+ 1000000 * 8);
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -680,8 +721,22 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
{
- return div_u64(mul_u32_u32(mode_clock, 1000000U),
- DP_DSC_FEC_OVERHEAD_FACTOR);
+ return div_u64(mul_u32_u32(mode_clock, DP_DSC_FEC_OVERHEAD_FACTOR),
+ 1000000U);
+}
+
+int intel_dp_bw_fec_overhead(bool fec_enabled)
+{
+ /*
+ * TODO: Calculate the actual overhead for a given mode.
+ * The hard-coded 1/0.972261=2.853% overhead factor
+ * corresponds (for instance) to the 8b/10b DP FEC 2.4% +
+ * 0.453% DSC overhead. This is enough for a 3840 width mode,
+ * which has a DSC overhead of up to ~0.2%, but may not be
+ * enough for a 1024 width mode where this is ~0.8% (on a 4
+ * lane DP link, with 2 DSC slices and 8 bpp color depth).
+ */
+ return fec_enabled ? DP_DSC_FEC_OVERHEAD_FACTOR : 1000000;
}
static int
@@ -1367,15 +1422,16 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
if (DISPLAY_VER(dev_priv) >= 12)
return true;
- if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A)
+ if (DISPLAY_VER(dev_priv) == 11 && encoder->port != PORT_A &&
+ !intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
return true;
return false;
}
-static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
- const struct intel_connector *connector,
- const struct intel_crtc_state *pipe_config)
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config)
{
return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
drm_dp_sink_supports_fec(connector->dp.fec_capability);
@@ -1388,6 +1444,7 @@ static bool intel_dp_supports_dsc(const struct intel_connector *connector,
return false;
return intel_dsc_source_support(crtc_state) &&
+ connector->dp.dsc_decompression_aux &&
drm_dp_sink_supports_dsc(connector->dp.dsc_dpcd);
}
@@ -1721,15 +1778,15 @@ static bool intel_dp_dsc_supports_format(const struct intel_connector *connector
return drm_dp_dsc_sink_supports_format(connector->dp.dsc_dpcd, sink_dsc_format);
}
-static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
+static bool is_bw_sufficient_for_dsc_config(u16 compressed_bppx16, u32 link_clock,
u32 lane_count, u32 mode_clock,
enum intel_output_format output_format,
int timeslots)
{
u32 available_bw, required_bw;
- available_bw = (link_clock * lane_count * timeslots) / 8;
- required_bw = compressed_bpp * (intel_dp_mode_to_fec_clock(mode_clock));
+ available_bw = (link_clock * lane_count * timeslots * 16) / 8;
+ required_bw = compressed_bppx16 * (intel_dp_mode_to_fec_clock(mode_clock));
return available_bw > required_bw;
}
@@ -1737,7 +1794,7 @@ static bool is_bw_sufficient_for_dsc_config(u16 compressed_bpp, u32 link_clock,
static int dsc_compute_link_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits,
- u16 compressed_bpp,
+ u16 compressed_bppx16,
int timeslots)
{
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -1752,8 +1809,8 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- if (!is_bw_sufficient_for_dsc_config(compressed_bpp, link_rate, lane_count,
- adjusted_mode->clock,
+ if (!is_bw_sufficient_for_dsc_config(compressed_bppx16, link_rate,
+ lane_count, adjusted_mode->clock,
pipe_config->output_format,
timeslots))
continue;
@@ -1795,7 +1852,7 @@ u16 intel_dp_dsc_max_sink_compressed_bppx16(const struct intel_connector *connec
return 0;
}
-static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
+int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
{
/* From Mandatory bit rate range Support Table 2-157 (DP v2.0) */
switch (pipe_config->output_format) {
@@ -1812,9 +1869,9 @@ static int dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config)
return 0;
}
-static int dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
- struct intel_crtc_state *pipe_config,
- int bpc)
+int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int bpc)
{
return intel_dp_dsc_max_sink_compressed_bppx16(connector,
pipe_config, bpc) >> 4;
@@ -1834,7 +1891,7 @@ static int dsc_src_max_compressed_bpp(struct intel_dp *intel_dp)
* Max Compressed bpp for Gen 13+ is 27bpp.
* For earlier platform is 23bpp. (Bspec:49259).
*/
- if (DISPLAY_VER(i915) <= 12)
+ if (DISPLAY_VER(i915) < 13)
return 23;
else
return 27;
@@ -1859,17 +1916,19 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp); i++) {
- if (valid_dsc_bpp[i] < dsc_min_bpp ||
- valid_dsc_bpp[i] > dsc_max_bpp)
+ if (valid_dsc_bpp[i] < dsc_min_bpp)
+ continue;
+ if (valid_dsc_bpp[i] > dsc_max_bpp)
break;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
limits,
- valid_dsc_bpp[i],
+ valid_dsc_bpp[i] << 4,
timeslots);
if (ret == 0) {
- pipe_config->dsc.compressed_bpp = valid_dsc_bpp[i];
+ pipe_config->dsc.compressed_bpp_x16 =
+ to_bpp_x16(valid_dsc_bpp[i]);
return 0;
}
}
@@ -1885,6 +1944,7 @@ icl_dsc_compute_link_config(struct intel_dp *intel_dp,
*/
static int
xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
struct link_config_limits *limits,
int dsc_max_bpp,
@@ -1892,22 +1952,38 @@ xelpd_dsc_compute_link_config(struct intel_dp *intel_dp,
int pipe_bpp,
int timeslots)
{
- u16 compressed_bpp;
+ u8 bppx16_incr = drm_dp_dsc_sink_bpp_incr(connector->dp.dsc_dpcd);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u16 compressed_bppx16;
+ u8 bppx16_step;
int ret;
- /* Compressed BPP should be less than the Input DSC bpp */
- dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
+ if (DISPLAY_VER(i915) < 14 || bppx16_incr <= 1)
+ bppx16_step = 16;
+ else
+ bppx16_step = 16 / bppx16_incr;
- for (compressed_bpp = dsc_max_bpp;
- compressed_bpp >= dsc_min_bpp;
- compressed_bpp--) {
+ /* Compressed BPP should be less than the Input DSC bpp */
+ dsc_max_bpp = min(dsc_max_bpp << 4, (pipe_bpp << 4) - bppx16_step);
+ dsc_min_bpp = dsc_min_bpp << 4;
+
+ for (compressed_bppx16 = dsc_max_bpp;
+ compressed_bppx16 >= dsc_min_bpp;
+ compressed_bppx16 -= bppx16_step) {
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ !to_bpp_frac(compressed_bppx16))
+ continue;
ret = dsc_compute_link_config(intel_dp,
pipe_config,
limits,
- compressed_bpp,
+ compressed_bppx16,
timeslots);
if (ret == 0) {
- pipe_config->dsc.compressed_bpp = compressed_bpp;
+ pipe_config->dsc.compressed_bpp_x16 = compressed_bppx16;
+ if (intel_dp->force_dsc_fractional_bpp_en &&
+ to_bpp_frac(compressed_bppx16))
+ drm_dbg_kms(&i915->drm, "Forcing DSC fractional bpp\n");
+
return 0;
}
}
@@ -1928,12 +2004,14 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
int dsc_joiner_max_bpp;
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ pipe_config,
+ pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
dsc_joiner_max_bpp = get_max_compressed_bpp_with_joiner(i915, adjusted_mode->clock,
@@ -1943,7 +2021,7 @@ static int dsc_compute_compressed_bpp(struct intel_dp *intel_dp,
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
if (DISPLAY_VER(i915) >= 13)
- return xelpd_dsc_compute_link_config(intel_dp, pipe_config, limits,
+ return xelpd_dsc_compute_link_config(intel_dp, connector, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
return icl_dsc_compute_link_config(intel_dp, pipe_config, limits,
dsc_max_bpp, dsc_min_bpp, pipe_bpp, timeslots);
@@ -2088,19 +2166,22 @@ static int intel_edp_dsc_compute_pipe_bpp(struct intel_dp *intel_dp,
pipe_config->lane_count = limits->max_lane_count;
dsc_src_min_bpp = dsc_src_min_compressed_bpp();
- dsc_sink_min_bpp = dsc_sink_min_compressed_bpp(pipe_config);
+ dsc_sink_min_bpp = intel_dp_dsc_sink_min_compressed_bpp(pipe_config);
dsc_min_bpp = max(dsc_src_min_bpp, dsc_sink_min_bpp);
dsc_min_bpp = max(dsc_min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
dsc_src_max_bpp = dsc_src_max_compressed_bpp(intel_dp);
- dsc_sink_max_bpp = dsc_sink_max_compressed_bpp(connector, pipe_config, pipe_bpp / 3);
+ dsc_sink_max_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ pipe_config,
+ pipe_bpp / 3);
dsc_max_bpp = dsc_sink_max_bpp ? min(dsc_sink_max_bpp, dsc_src_max_bpp) : dsc_src_max_bpp;
dsc_max_bpp = min(dsc_max_bpp, to_bpp_int(limits->link.max_bpp_x16));
/* Compressed BPP should be less than the Input DSC bpp */
dsc_max_bpp = min(dsc_max_bpp, pipe_bpp - 1);
- pipe_config->dsc.compressed_bpp = max(dsc_min_bpp, dsc_max_bpp);
+ pipe_config->dsc.compressed_bpp_x16 =
+ to_bpp_x16(max(dsc_min_bpp, dsc_max_bpp));
pipe_config->pipe_bpp = pipe_bpp;
@@ -2122,8 +2203,9 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
&pipe_config->hw.adjusted_mode;
int ret;
- pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
- intel_dp_supports_fec(intel_dp, connector, pipe_config);
+ pipe_config->fec_enable = pipe_config->fec_enable ||
+ (!intel_dp_is_edp(intel_dp) &&
+ intel_dp_supports_fec(intel_dp, connector, pipe_config));
if (!intel_dp_supports_dsc(connector, pipe_config))
return -EINVAL;
@@ -2188,18 +2270,18 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
drm_dbg_kms(&dev_priv->drm,
- "Cannot compute valid DSC parameters for Input Bpp = %d "
- "Compressed BPP = %d\n",
+ "Cannot compute valid DSC parameters for Input Bpp = %d"
+ "Compressed BPP = " BPP_X16_FMT "\n",
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
return ret;
}
pipe_config->dsc.compression_enable = true;
drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = %d Slice Count = %d\n",
+ "Compressed Bpp = " BPP_X16_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp,
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16),
pipe_config->dsc.slice_count);
return 0;
@@ -2314,6 +2396,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ const struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2322,6 +2406,10 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
bool dsc_needed;
int ret = 0;
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
adjusted_mode->crtc_clock))
pipe_config->bigjoiner_pipes = GENMASK(crtc->pipe + 1, crtc->pipe);
@@ -2369,15 +2457,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
if (pipe_config->dsc.compression_enable) {
drm_dbg_kms(&i915->drm,
- "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
+ "DP lane count %d clock %d Input bpp %d Compressed bpp " BPP_X16_FMT "\n",
pipe_config->lane_count, pipe_config->port_clock,
pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ BPP_X16_ARGS(pipe_config->dsc.compressed_bpp_x16));
drm_dbg_kms(&i915->drm,
"DP link rate required %i available %i\n",
intel_dp_link_required(adjusted_mode->crtc_clock,
- pipe_config->dsc.compressed_bpp),
+ to_bpp_int_roundup(pipe_config->dsc.compressed_bpp_x16)),
intel_dp_max_data_rate(pipe_config->port_clock,
pipe_config->lane_count));
} else {
@@ -2446,12 +2534,22 @@ static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- /*
- * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
- * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
- * Colorimetry Format indication.
- */
- vsc->revision = 0x5;
+ if (crtc_state->has_panel_replay) {
+ /*
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo, Panel Replay, and Pixel
+ * Encoding/Colorimetry Format indication.
+ */
+ vsc->revision = 0x7;
+ } else {
+ /*
+ * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
+ * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
+ * Colorimetry Format indication.
+ */
+ vsc->revision = 0x5;
+ }
+
vsc->length = 0x13;
/* DP 1.4a spec, Table 2-120 */
@@ -2560,6 +2658,21 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
vsc->revision = 0x4;
vsc->length = 0xe;
}
+ } else if (crtc_state->has_panel_replay) {
+ if (intel_dp->psr.colorimetry_support &&
+ intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
+ /* [Panel Replay with colorimetry info] */
+ intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
+ vsc);
+ } else {
+ /*
+ * [Panel Replay without colorimetry info]
+ * Prepare VSC Header for SU as per DP 2.0 spec, Table 2-223
+ * VSC SDP supporting 3D stereo + Panel Replay.
+ */
+ vsc->revision = 0x6;
+ vsc->length = 0x10;
+ }
} else {
/*
* [PSR1]
@@ -2636,14 +2749,18 @@ static bool can_enable_drrs(struct intel_connector *connector,
static void
intel_dp_drrs_compute_config(struct intel_connector *connector,
struct intel_crtc_state *pipe_config,
- int link_bpp)
+ int link_bpp_x16)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
const struct drm_display_mode *downclock_mode =
intel_panel_downclock_mode(connector, &pipe_config->hw.adjusted_mode);
int pixel_clock;
- if (has_seamless_m_n(connector))
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that when updating M/N live.
+ */
+ if (has_seamless_m_n(connector) && !pipe_config->bigjoiner_pipes)
pipe_config->update_m_n = true;
if (!can_enable_drrs(connector, pipe_config, downclock_mode)) {
@@ -2661,9 +2778,10 @@ intel_dp_drrs_compute_config(struct intel_connector *connector,
if (pipe_config->splitter.enable)
pixel_clock /= pipe_config->splitter.link_count;
- intel_link_compute_m_n(link_bpp, pipe_config->lane_count, pixel_clock,
- pipe_config->port_clock, &pipe_config->dp_m2_n2,
- pipe_config->fec_enable);
+ intel_link_compute_m_n(link_bpp_x16, pipe_config->lane_count, pixel_clock,
+ pipe_config->port_clock,
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m2_n2);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2739,19 +2857,12 @@ intel_dp_audio_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- struct drm_connector *connector = conn_state->connector;
-
pipe_config->has_audio =
intel_dp_has_audio(encoder, pipe_config, conn_state) &&
intel_audio_compute_config(encoder, pipe_config, conn_state);
pipe_config->sdp_split_enable = pipe_config->has_audio &&
intel_dp_is_uhbr(pipe_config);
-
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s] SDP split enable: %s\n",
- connector->base.id, connector->name,
- str_yes_no(pipe_config->sdp_split_enable));
}
int
@@ -2764,7 +2875,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
const struct drm_display_mode *fixed_mode;
struct intel_connector *connector = intel_dp->attached_connector;
- int ret = 0, link_bpp;
+ int ret = 0, link_bpp_x16;
if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && encoder->port != PORT_A)
pipe_config->has_pch_encoder = true;
@@ -2813,10 +2924,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
drm_dp_enhanced_frame_cap(intel_dp->dpcd);
if (pipe_config->dsc.compression_enable)
- link_bpp = pipe_config->dsc.compressed_bpp;
+ link_bpp_x16 = pipe_config->dsc.compressed_bpp_x16;
else
- link_bpp = intel_dp_output_bpp(pipe_config->output_format,
- pipe_config->pipe_bpp);
+ link_bpp_x16 = to_bpp_x16(intel_dp_output_bpp(pipe_config->output_format,
+ pipe_config->pipe_bpp));
if (intel_dp->mso_link_count) {
int n = intel_dp->mso_link_count;
@@ -2840,12 +2951,12 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_link_compute_m_n(link_bpp,
+ intel_link_compute_m_n(link_bpp_x16,
pipe_config->lane_count,
adjusted_mode->crtc_clock,
pipe_config->port_clock,
- &pipe_config->dp_m_n,
- pipe_config->fec_enable);
+ intel_dp_bw_fec_overhead(pipe_config->fec_enable),
+ &pipe_config->dp_m_n);
/* FIXME: abstract this better */
if (pipe_config->splitter.enable)
@@ -2856,7 +2967,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
intel_vrr_compute_config(pipe_config, conn_state);
intel_psr_compute_config(intel_dp, pipe_config, conn_state);
- intel_dp_drrs_compute_config(connector, pipe_config, link_bpp);
+ intel_dp_drrs_compute_config(connector, pipe_config, link_bpp_x16);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -2924,25 +3035,180 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable)
+static int
+write_dsc_decompression_flag(struct drm_dp_aux *aux, u8 flag, bool set)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- int ret;
+ int err;
+ u8 val;
- if (!crtc_state->dsc.compression_enable)
- return;
+ err = drm_dp_dpcd_readb(aux, DP_DSC_ENABLE, &val);
+ if (err < 0)
+ return err;
- ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
- enable ? DP_DECOMPRESSION_EN : 0);
- if (ret < 0)
+ if (set)
+ val |= flag;
+ else
+ val &= ~flag;
+
+ return drm_dp_dpcd_writeb(aux, DP_DSC_ENABLE, val);
+}
+
+static void
+intel_dp_sink_set_dsc_decompression(struct intel_connector *connector,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (write_dsc_decompression_flag(connector->dp.dsc_decompression_aux,
+ DP_DECOMPRESSION_EN, enable) < 0)
drm_dbg_kms(&i915->drm,
"Failed to %s sink decompression state\n",
str_enable_disable(enable));
}
static void
+intel_dp_sink_set_dsc_passthrough(const struct intel_connector *connector,
+ bool enable)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_aux *aux = connector->port ?
+ connector->port->passthrough_aux : NULL;
+
+ if (!aux)
+ return;
+
+ if (write_dsc_decompression_flag(aux,
+ DP_DSC_PASSTHROUGH_EN, enable) < 0)
+ drm_dbg_kms(&i915->drm,
+ "Failed to %s sink compression passthrough state\n",
+ str_enable_disable(enable));
+}
+
+static int intel_dp_dsc_aux_ref_count(struct intel_atomic_state *state,
+ const struct intel_connector *connector,
+ bool for_get_ref)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct drm_connector *_connector_iter;
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ int ref_count = 0;
+ int i;
+
+ /*
+ * On SST the decompression AUX device won't be shared, each connector
+ * uses for this its own AUX targeting the sink device.
+ */
+ if (!connector->mst_port)
+ return connector->dp.dsc_decompression_enabled ? 1 : 0;
+
+ for_each_oldnew_connector_in_state(&state->base, _connector_iter,
+ old_conn_state, new_conn_state, i) {
+ const struct intel_connector *
+ connector_iter = to_intel_connector(_connector_iter);
+
+ if (connector_iter->mst_port != connector->mst_port)
+ continue;
+
+ if (!connector_iter->dp.dsc_decompression_enabled)
+ continue;
+
+ drm_WARN_ON(&i915->drm,
+ (for_get_ref && !new_conn_state->crtc) ||
+ (!for_get_ref && !old_conn_state->crtc));
+
+ if (connector_iter->dp.dsc_decompression_aux ==
+ connector->dp.dsc_decompression_aux)
+ ref_count++;
+ }
+
+ return ref_count;
+}
+
+static bool intel_dp_dsc_aux_get_ref(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ bool ret = intel_dp_dsc_aux_ref_count(state, connector, true) == 0;
+
+ connector->dp.dsc_decompression_enabled = true;
+
+ return ret;
+}
+
+static bool intel_dp_dsc_aux_put_ref(struct intel_atomic_state *state,
+ struct intel_connector *connector)
+{
+ connector->dp.dsc_decompression_enabled = false;
+
+ return intel_dp_dsc_aux_ref_count(state, connector, false) == 0;
+}
+
+/**
+ * intel_dp_sink_enable_decompression - Enable DSC decompression in sink/last branch device
+ * @state: atomic state
+ * @connector: connector to enable the decompression for
+ * @new_crtc_state: new state for the CRTC driving @connector
+ *
+ * Enable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
+ * register of the appropriate sink/branch device. On SST this is always the
+ * sink device, whereas on MST based on each device's DSC capabilities it's
+ * either the last branch device (enabling decompression in it) or both the
+ * last branch device (enabling passthrough in it) and the sink device
+ * (enabling decompression in it).
+ */
+void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!new_crtc_state->dsc.compression_enable)
+ return;
+
+ if (drm_WARN_ON(&i915->drm,
+ !connector->dp.dsc_decompression_aux ||
+ connector->dp.dsc_decompression_enabled))
+ return;
+
+ if (!intel_dp_dsc_aux_get_ref(state, connector))
+ return;
+
+ intel_dp_sink_set_dsc_passthrough(connector, true);
+ intel_dp_sink_set_dsc_decompression(connector, true);
+}
+
+/**
+ * intel_dp_sink_disable_decompression - Disable DSC decompression in sink/last branch device
+ * @state: atomic state
+ * @connector: connector to disable the decompression for
+ * @old_crtc_state: old state for the CRTC driving @connector
+ *
+ * Disable the DSC decompression if required in the %DP_DSC_ENABLE DPCD
+ * register of the appropriate sink/branch device, corresponding to the
+ * sequence in intel_dp_sink_enable_decompression().
+ */
+void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+
+ if (!old_crtc_state->dsc.compression_enable)
+ return;
+
+ if (drm_WARN_ON(&i915->drm,
+ !connector->dp.dsc_decompression_aux ||
+ !connector->dp.dsc_decompression_enabled))
+ return;
+
+ if (!intel_dp_dsc_aux_put_ref(state, connector))
+ return;
+
+ intel_dp_sink_set_dsc_decompression(connector, false);
+ intel_dp_sink_set_dsc_passthrough(connector, false);
+}
+
+static void
intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
@@ -3778,7 +4044,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- return i915->params.enable_dp_mst &&
+ return i915->display.params.enable_dp_mst &&
intel_dp_mst_source_support(intel_dp) &&
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
}
@@ -3796,13 +4062,13 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
encoder->base.base.id, encoder->base.name,
str_yes_no(intel_dp_mst_source_support(intel_dp)),
str_yes_no(sink_can_mst),
- str_yes_no(i915->params.enable_dp_mst));
+ str_yes_no(i915->display.params.enable_dp_mst));
if (!intel_dp_mst_source_support(intel_dp))
return;
intel_dp->is_mst = sink_can_mst &&
- i915->params.enable_dp_mst;
+ i915->display.params.enable_dp_mst;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -3872,11 +4138,16 @@ static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
+ if (vsc->revision == 0x6) {
+ sdp->db[0] = 1;
+ sdp->db[3] = 1;
+ }
+
/*
- * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
- * per DP 1.4a spec.
+ * Revision 0x5 and revision 0x7 supports Pixel Encoding/Colorimetry
+ * Format as per DP 1.4a spec and DP 2.0 respectively.
*/
- if (vsc->revision != 0x5)
+ if (!(vsc->revision == 0x5 || vsc->revision == 0x7))
goto out;
/* VSC SDP Payload for DB16 through DB18 */
@@ -4056,7 +4327,10 @@ void intel_dp_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
u32 val = intel_de_read(dev_priv, reg) & ~dip_enable;
- /* TODO: Add DSC case (DIP_ENABLE_PPS) */
+ /* TODO: Sanitize DSC enabling wrt. intel_dsc_dp_pps_write(). */
+ if (!enable && HAS_DSC(dev_priv))
+ val &= ~VDIP_ENABLE_PPS;
+
/* When PSR is enabled, this routine doesn't disable VSC DIP */
if (!crtc_state->has_psr)
val &= ~VIDEO_DIP_ENABLE_VSC_HSW;
@@ -5416,6 +5690,7 @@ intel_dp_detect(struct drm_connector *connector,
if (status == connector_status_disconnected) {
memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
memset(intel_connector->dp.dsc_dpcd, 0, sizeof(intel_connector->dp.dsc_dpcd));
+ intel_dp->psr.sink_panel_replay_support = false;
if (intel_dp->is_mst) {
drm_dbg_kms(&dev_priv->drm,
@@ -5430,6 +5705,9 @@ intel_dp_detect(struct drm_connector *connector,
goto out;
}
+ if (!intel_dp_is_edp(intel_dp))
+ intel_psr_init_dpcd(intel_dp);
+
intel_dp_detect_dsc_caps(intel_dp, intel_connector);
intel_dp_configure_mst(intel_dp);
@@ -5590,6 +5868,19 @@ intel_dp_connector_unregister(struct drm_connector *connector)
intel_connector_unregister(connector);
}
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+
+ if (crtc_state && crtc_state->dsc.compression_enable) {
+ drm_WARN_ON(&i915->drm, !connector->dp.dsc_decompression_aux);
+ connector->dp.dsc_decompression_enabled = true;
+ } else {
+ connector->dp.dsc_decompression_enabled = false;
+ }
+}
+
void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
@@ -6238,6 +6529,7 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
if (!intel_edp_init_connector(intel_dp, intel_connector)) {
intel_dp_aux_fini(intel_dp);
@@ -6261,16 +6553,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
- * 0xd. Failure to do so will result in spurious interrupts being
- * generated on the port when a cable is not attached.
- */
- if (IS_G45(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
- intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
- (temp & ~0xf) | 0xd);
- }
-
intel_dp->frl.is_trained = false;
intel_dp->frl.trained_rate_gbps = 0;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 484aea215a..375d0677cd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -45,6 +45,8 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
int intel_dp_min_bpp(enum intel_output_format output_format);
bool intel_dp_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector);
+void intel_dp_connector_sync_state(struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state);
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count);
int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
@@ -57,9 +59,12 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode);
void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- bool enable);
+void intel_dp_sink_enable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *new_crtc_state);
+void intel_dp_sink_disable_decompression(struct intel_atomic_state *state,
+ struct intel_connector *connector,
+ const struct intel_crtc_state *old_crtc_state);
void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder);
void intel_dp_encoder_shutdown(struct intel_encoder *intel_encoder);
void intel_dp_encoder_flush_work(struct drm_encoder *encoder);
@@ -78,6 +83,8 @@ void intel_dp_audio_compute_config(struct intel_encoder *encoder,
bool intel_dp_has_hdmi_sink(struct intel_dp *intel_dp);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
+int intel_dp_link_symbol_size(int rate);
+int intel_dp_link_symbol_clock(int rate);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
@@ -98,6 +105,8 @@ bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
+int intel_dp_effective_data_rate(int pixel_clock, int bpp_x16,
+ int bw_overhead);
int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
@@ -125,6 +134,10 @@ u16 intel_dp_dsc_get_max_compressed_bpp(struct drm_i915_private *i915,
enum intel_output_format output_format,
u32 pipe_bpp,
u32 timeslots);
+int intel_dp_dsc_sink_min_compressed_bpp(struct intel_crtc_state *pipe_config);
+int intel_dp_dsc_sink_max_compressed_bpp(const struct intel_connector *connector,
+ struct intel_crtc_state *pipe_config,
+ int bpc);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
bool bigjoiner);
@@ -136,7 +149,16 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
return ~((1 << lane_count) - 1) & 0xf;
}
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config);
u32 intel_dp_mode_to_fec_clock(u32 mode_clock);
+int intel_dp_bw_fec_overhead(bool fec_enabled);
+
+bool intel_dp_supports_fec(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
+ const struct intel_crtc_state *pipe_config);
+
u32 intel_dp_dsc_nearest_valid_bpp(struct drm_i915_private *i915, u32 bpp, u32 pipe_bpp);
void intel_ddi_update_pipe(struct intel_atomic_state *state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 4431b6290c..2e2af71bcd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -74,7 +74,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
if (index)
return 0;
@@ -83,12 +83,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(i915)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 freq;
@@ -101,18 +101,18 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = dev_priv->display.cdclk.hw.cdclk;
+ freq = i915->display.cdclk.hw.cdclk;
else
- freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ freq = RUNTIME_INFO(i915)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
+ if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
/* Workaround for non-ULT HSW */
switch (index) {
case 0: return 63;
@@ -165,12 +165,11 @@ static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 aux_clock_divider)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv =
- to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 timeout;
/* Max timeout value on G4x-BDW: 1.6ms */
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(i915))
timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
else
timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
@@ -229,8 +228,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u32 aux_send_ctl_flags)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
bool is_tc_port = intel_phy_is_tc(i915, phy);
i915_reg_t ch_ctl, ch_data[5];
@@ -531,9 +529,40 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
}
+static i915_reg_t vlv_aux_ctl_reg(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return VLV_DP_AUX_CH_CTL(aux_ch);
+ default:
+ MISSING_CASE(aux_ch);
+ return VLV_DP_AUX_CH_CTL(AUX_CH_B);
+ }
+}
+
+static i915_reg_t vlv_aux_data_reg(struct intel_dp *intel_dp, int index)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ enum aux_ch aux_ch = dig_port->aux_ch;
+
+ switch (aux_ch) {
+ case AUX_CH_B:
+ case AUX_CH_C:
+ case AUX_CH_D:
+ return VLV_DP_AUX_CH_DATA(aux_ch, index);
+ default:
+ MISSING_CASE(aux_ch);
+ return VLV_DP_AUX_CH_DATA(AUX_CH_B, index);
+ }
+}
+
static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -550,7 +579,6 @@ static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -567,7 +595,6 @@ static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -586,7 +613,6 @@ static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -605,7 +631,6 @@ static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -625,7 +650,6 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -645,7 +669,6 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -668,7 +691,6 @@ static i915_reg_t tgl_aux_ctl_reg(struct intel_dp *intel_dp)
static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -691,7 +713,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -702,16 +724,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_CTL(dev_priv, aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(i915, aux_ch);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_CTL(dev_priv, AUX_CH_A);
+ return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A);
}
}
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -722,10 +744,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_DATA(dev_priv, aux_ch, index);
+ return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_DATA(dev_priv, AUX_CH_A, index);
+ return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index);
}
}
@@ -739,49 +761,52 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
char buf[AUX_CH_NAME_BUFSIZE];
- if (DISPLAY_VER(dev_priv) >= 14) {
+ if (DISPLAY_VER(i915) >= 14) {
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(i915) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(i915) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
+ } else if (HAS_PCH_SPLIT(i915)) {
intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
+ } else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
+ intel_dp->aux_ch_ctl_reg = vlv_aux_ctl_reg;
+ intel_dp->aux_ch_data_reg = vlv_aux_data_reg;
} else {
intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
}
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(i915) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
- else if (HAS_PCH_SPLIT(dev_priv))
+ else if (HAS_PCH_SPLIT(i915))
intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(i915) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_dp->aux.drm_dev = &dev_priv->drm;
+ intel_dp->aux.drm_dev = &i915->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
- aux_ch_name(dev_priv, buf, sizeof(buf), aux_ch),
+ aux_ch_name(i915, buf, sizeof(buf), aux_ch),
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 26ea7e9f1b..4f58efdc68 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -146,7 +146,7 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
* HDR static metadata we need to start maintaining table of
* ranges for such panels.
*/
- if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
+ if (i915->display.params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
@@ -489,7 +489,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
/* Check the VBT and user's module parameters to figure out which
* interfaces to probe
*/
- switch (i915->params.enable_dpcd_backlight) {
+ switch (i915->display.params.enable_dpcd_backlight) {
case INTEL_DP_AUX_BACKLIGHT_OFF:
return -ENODEV;
case INTEL_DP_AUX_BACKLIGHT_AUTO:
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
index 34f6e0a48e..e642445364 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_regs.h
@@ -21,13 +21,14 @@
#define __xe2lpd_aux_ch_idx(aux_ch) \
(aux_ch >= AUX_CH_USBC1 ? aux_ch : AUX_CH_USBC4 + 1 + (aux_ch) - AUX_CH_A)
-/* TODO: Remove implicit dev_priv */
-#define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010)
-#define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110)
+#define _DPA_AUX_CH_CTL 0x64010
+#define _DPB_AUX_CH_CTL 0x64110
#define _XELPDP_USBC1_AUX_CH_CTL 0x16f210
#define _XELPDP_USBC2_AUX_CH_CTL 0x16f410
#define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, \
_DPB_AUX_CH_CTL)
+#define VLV_DP_AUX_CH_CTL(aux_ch) _MMIO(VLV_DISPLAY_BASE + \
+ _PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL))
#define _XELPDP_DP_AUX_CH_CTL(aux_ch) \
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
_DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL, \
@@ -69,13 +70,14 @@
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK REG_GENMASK(4, 0) /* skl+ */
#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) REG_FIELD_PREP(DP_AUX_CH_CTL_SYNC_PULSE_SKL_MASK, (c) - 1)
-/* TODO: Remove implicit dev_priv */
-#define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014)
-#define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114)
+#define _DPA_AUX_CH_DATA1 0x64014
+#define _DPB_AUX_CH_DATA1 0x64114
#define _XELPDP_USBC1_AUX_CH_DATA1 0x16f214
#define _XELPDP_USBC2_AUX_CH_DATA1 0x16f414
#define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, \
_DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
+#define VLV_DP_AUX_CH_DATA(aux_ch, i) _MMIO(VLV_DISPLAY_BASE + _PORT(aux_ch, _DPA_AUX_CH_DATA1, \
+ _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */
#define _XELPDP_DP_AUX_CH_DATA(aux_ch, i) \
_MMIO(_PICK_EVEN_2RANGES(aux_ch, AUX_CH_USBC1, \
_DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1, \
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index 3a595cd433..8538d1ce2f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -330,23 +330,13 @@ static const struct hdcp2_dp_msg_data hdcp2_dp_msg_data[] = {
0, 0 },
};
-static struct drm_dp_aux *
-intel_dp_hdcp_get_aux(struct intel_connector *connector)
-{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
-
- if (intel_encoder_is_mst(connector->encoder))
- return &connector->port->aux;
- else
- return &dig_port->dp.aux;
-}
-
static int
intel_dp_hdcp2_read_rx_status(struct intel_connector *connector,
u8 *rx_status)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
ret = drm_dp_dpcd_read(aux,
@@ -399,7 +389,9 @@ intel_dp_hdcp2_wait_for_msg(struct intel_connector *connector,
const struct hdcp2_dp_msg_data *hdcp2_msg_data)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
u8 msg_id = hdcp2_msg_data->msg_id;
int ret, timeout;
bool msg_ready = false;
@@ -454,8 +446,9 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
- struct drm_dp_aux *aux;
hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
if (!hdcp2_msg_data)
@@ -463,8 +456,6 @@ int intel_dp_hdcp2_write_msg(struct intel_connector *connector,
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
/* No msg_id in DP HDCP2.2 msgs */
bytes_to_write = size - 1;
byte++;
@@ -490,7 +481,8 @@ static
ssize_t get_receiver_id_list_rx_info(struct intel_connector *connector,
u32 *dev_cnt, u8 *byte)
{
- struct drm_dp_aux *aux = intel_dp_hdcp_get_aux(connector);
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
ssize_t ret;
u8 *rx_info = byte;
@@ -515,8 +507,9 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_dp_aux *aux;
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
@@ -530,8 +523,6 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
return -EINVAL;
offset = hdcp2_msg_data->offset;
- aux = intel_dp_hdcp_get_aux(connector);
-
ret = intel_dp_hdcp2_wait_for_msg(connector, hdcp2_msg_data);
if (ret < 0)
return ret;
@@ -561,13 +552,8 @@ int intel_dp_hdcp2_read_msg(struct intel_connector *connector,
/* Entire msg read timeout since initiate of msg read */
if (bytes_to_recv == size - 1 && hdcp2_msg_data->msg_read_timeout > 0) {
- if (intel_encoder_is_mst(connector->encoder))
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout *
- connector->port->parent->num_ports);
- else
- msg_end = ktime_add_ms(ktime_get_raw(),
- hdcp2_msg_data->msg_read_timeout);
+ msg_end = ktime_add_ms(ktime_get_raw(),
+ hdcp2_msg_data->msg_read_timeout);
}
ret = drm_dp_dpcd_read(aux, offset,
@@ -651,12 +637,11 @@ static
int intel_dp_hdcp2_capable(struct intel_connector *connector,
bool *capable)
{
- struct drm_dp_aux *aux;
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct drm_dp_aux *aux = &dig_port->dp.aux;
u8 rx_caps[3];
int ret;
- aux = intel_dp_hdcp_get_aux(connector);
-
*capable = false;
ret = drm_dp_dpcd_read(aux,
DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 03ac281766..2151e916cc 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
+#include <drm/drm_fixed.h>
#include <drm/drm_probe_helper.h>
#include "i915_drv.h"
@@ -43,6 +44,9 @@
#include "intel_dpio_phy.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
+#include "intel_link_bw.h"
+#include "intel_psr.h"
+#include "intel_vdsc.h"
#include "skl_scaler.h"
static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp,
@@ -50,7 +54,7 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp
struct intel_crtc_state *crtc_state,
bool dsc)
{
- if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) <= 13 && dsc) {
+ if (intel_dp_is_uhbr(crtc_state) && DISPLAY_VER(i915) < 14 && dsc) {
int output_bpp = bpp;
/* DisplayPort 2 128b/132b, bits per lane is always 32 */
int symbol_clock = crtc_state->port_clock / 32;
@@ -66,6 +70,73 @@ static int intel_dp_mst_check_constraints(struct drm_i915_private *i915, int bpp
return 0;
}
+static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
+ const struct intel_connector *connector,
+ bool ssc, bool dsc, int bpp_x16)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
+ int dsc_slice_count = 0;
+ int overhead;
+
+ flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
+ flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
+ flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
+
+ if (dsc) {
+ flags |= DRM_DP_BW_OVERHEAD_DSC;
+ /* TODO: add support for bigjoiner */
+ dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+ adjusted_mode->clock,
+ adjusted_mode->hdisplay,
+ false);
+ }
+
+ overhead = drm_dp_bw_overhead(crtc_state->lane_count,
+ adjusted_mode->hdisplay,
+ dsc_slice_count,
+ bpp_x16,
+ flags);
+
+ /*
+ * TODO: clarify whether a minimum required by the fixed FEC overhead
+ * in the bspec audio programming sequence is required here.
+ */
+ return max(overhead, intel_dp_bw_fec_overhead(crtc_state->fec_enable));
+}
+
+static void intel_dp_mst_compute_m_n(const struct intel_crtc_state *crtc_state,
+ const struct intel_connector *connector,
+ int overhead,
+ int bpp_x16,
+ struct intel_link_m_n *m_n)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ /* TODO: Check WA 14013163432 to set data M/N for full BW utilization. */
+ intel_link_compute_m_n(bpp_x16, crtc_state->lane_count,
+ adjusted_mode->crtc_clock,
+ crtc_state->port_clock,
+ overhead,
+ m_n);
+
+ m_n->tu = DIV_ROUND_UP_ULL(mul_u32_u32(m_n->data_m, 64), m_n->data_n);
+}
+
+static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
+{
+ int effective_data_rate =
+ intel_dp_effective_data_rate(pixel_clock, bpp_x16, bw_overhead);
+
+ /*
+ * TODO: Use drm_dp_calc_pbn_mode() instead, once it's converted
+ * to calculate PBN with the BW overhead passed to it.
+ */
+ return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
+}
+
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int max_bpp,
@@ -94,19 +165,67 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
crtc_state->lane_count = limits->max_lane_count;
crtc_state->port_clock = limits->max_rate;
+ if (dsc) {
+ if (!intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
+
+ crtc_state->fec_enable = !intel_dp_is_uhbr(crtc_state);
+ }
+
mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
crtc_state->port_clock,
crtc_state->lane_count);
+ drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
+ min_bpp, max_bpp);
+
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
+ int local_bw_overhead;
+ int remote_bw_overhead;
+ int link_bpp_x16;
+ int remote_tu;
+
drm_dbg_kms(&i915->drm, "Trying bpp %d\n", bpp);
ret = intel_dp_mst_check_constraints(i915, bpp, adjusted_mode, crtc_state, dsc);
if (ret)
continue;
- crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock,
- bpp << 4);
+ link_bpp_x16 = to_bpp_x16(dsc ? bpp :
+ intel_dp_output_bpp(crtc_state->output_format, bpp));
+
+ local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ false, dsc, link_bpp_x16);
+ remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
+ true, dsc, link_bpp_x16);
+
+ intel_dp_mst_compute_m_n(crtc_state, connector,
+ local_bw_overhead,
+ link_bpp_x16,
+ &crtc_state->dp_m_n);
+
+ /*
+ * The TU size programmed to the HW determines which slots in
+ * an MTP frame are used for this stream, which needs to match
+ * the payload size programmed to the first downstream branch
+ * device's payload table.
+ *
+ * Note that atm the payload's PBN value DRM core sends via
+ * the ALLOCATE_PAYLOAD side-band message matches the payload
+ * size (which it calculates from the PBN value) it programs
+ * to the first branch device's payload table. The allocation
+ * in the payload table could be reduced though (to
+ * crtc_state->dp_m_n.tu), provided that the driver doesn't
+ * enable SSC on the corresponding link.
+ */
+ crtc_state->pbn = intel_dp_mst_calc_pbn(adjusted_mode->crtc_clock,
+ link_bpp_x16,
+ remote_bw_overhead);
+
+ remote_tu = DIV_ROUND_UP(dfixed_const(crtc_state->pbn), mst_state->pbn_div.full);
+
+ drm_WARN_ON(&i915->drm, remote_tu < crtc_state->dp_m_n.tu);
+ crtc_state->dp_m_n.tu = remote_tu;
slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr,
connector->port,
@@ -115,13 +234,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
return slots;
if (slots >= 0) {
- ret = drm_dp_mst_atomic_check(state);
- /*
- * If we got slots >= 0 and we can fit those based on check
- * then we can exit the loop. Otherwise keep trying.
- */
- if (!ret)
- break;
+ drm_WARN_ON(&i915->drm, slots != crtc_state->dp_m_n.tu);
+
+ break;
}
}
@@ -136,7 +251,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
if (!dsc)
crtc_state->pipe_bpp = bpp;
else
- crtc_state->dsc.compressed_bpp = bpp;
+ crtc_state->dsc.compressed_bpp_x16 = to_bpp_x16(bpp);
drm_dbg_kms(&i915->drm, "Got %d slots for pipe bpp %d dsc %d\n", slots, bpp, dsc);
}
@@ -148,10 +263,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state,
struct link_config_limits *limits)
{
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
int slots = -EINVAL;
- int link_bpp;
/*
* FIXME: allocate the BW according to link_bpp, which in the case of
@@ -166,16 +278,6 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
if (slots < 0)
return slots;
- link_bpp = intel_dp_output_bpp(crtc_state->output_format, crtc_state->pipe_bpp);
-
- intel_link_compute_m_n(link_bpp,
- crtc_state->lane_count,
- adjusted_mode->crtc_clock,
- crtc_state->port_clock,
- &crtc_state->dp_m_n,
- crtc_state->fec_enable);
- crtc_state->dp_m_n.tu = slots;
-
return 0;
}
@@ -187,15 +289,12 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
int slots = -EINVAL;
int i, num_bpc;
u8 dsc_bpc[3] = {};
int min_bpp, max_bpp, sink_min_bpp, sink_max_bpp;
u8 dsc_max_bpc;
- bool need_timeslot_recalc = false;
- u32 last_compressed_bpp;
+ int min_compressed_bpp, max_compressed_bpp;
/* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
if (DISPLAY_VER(i915) >= 12)
@@ -231,45 +330,31 @@ static int intel_dp_dsc_mst_compute_link_config(struct intel_encoder *encoder,
if (max_bpp > sink_max_bpp)
max_bpp = sink_max_bpp;
- min_bpp = max(min_bpp, to_bpp_int_roundup(limits->link.min_bpp_x16));
- max_bpp = min(max_bpp, to_bpp_int(limits->link.max_bpp_x16));
+ max_compressed_bpp = intel_dp_dsc_sink_max_compressed_bpp(connector,
+ crtc_state,
+ max_bpp / 3);
+ max_compressed_bpp = min(max_compressed_bpp,
+ to_bpp_int(limits->link.max_bpp_x16));
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_bpp,
- min_bpp, limits,
- conn_state, 2 * 3, true);
+ min_compressed_bpp = intel_dp_dsc_sink_min_compressed_bpp(crtc_state);
+ min_compressed_bpp = max(min_compressed_bpp,
+ to_bpp_int_roundup(limits->link.min_bpp_x16));
- if (slots < 0)
- return slots;
+ drm_dbg_kms(&i915->drm, "DSC Sink supported compressed min bpp %d compressed max bpp %d\n",
+ min_compressed_bpp, max_compressed_bpp);
- last_compressed_bpp = crtc_state->dsc.compressed_bpp;
+ /* Align compressed bpps according to our own constraints */
+ max_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, max_compressed_bpp,
+ crtc_state->pipe_bpp);
+ min_compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915, min_compressed_bpp,
+ crtc_state->pipe_bpp);
- crtc_state->dsc.compressed_bpp = intel_dp_dsc_nearest_valid_bpp(i915,
- last_compressed_bpp,
- crtc_state->pipe_bpp);
+ slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state, max_compressed_bpp,
+ min_compressed_bpp, limits,
+ conn_state, 1, true);
- if (crtc_state->dsc.compressed_bpp != last_compressed_bpp)
- need_timeslot_recalc = true;
-
- /*
- * Apparently some MST hubs dislike if vcpi slots are not matching precisely
- * the actual compressed bpp we use.
- */
- if (need_timeslot_recalc) {
- slots = intel_dp_mst_find_vcpi_slots_for_bpp(encoder, crtc_state,
- crtc_state->dsc.compressed_bpp,
- crtc_state->dsc.compressed_bpp,
- limits, conn_state, 2 * 3, true);
- if (slots < 0)
- return slots;
- }
-
- intel_link_compute_m_n(crtc_state->dsc.compressed_bpp,
- crtc_state->lane_count,
- adjusted_mode->crtc_clock,
- crtc_state->port_clock,
- &crtc_state->dp_m_n,
- crtc_state->fec_enable);
- crtc_state->dp_m_n.tu = slots;
+ if (slots < 0)
+ return slots;
return 0;
}
@@ -297,7 +382,102 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder,
}
static bool
+intel_dp_mst_dsc_source_support(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ /*
+ * FIXME: Enabling DSC on ICL results in blank screen and FIFO pipe /
+ * transcoder underruns, re-enable DSC after fixing this issue.
+ */
+ return DISPLAY_VER(i915) >= 12 && intel_dsc_source_support(crtc_state);
+}
+
+static int mode_hblank_period_ns(const struct drm_display_mode *mode)
+{
+ return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(mode->htotal - mode->hdisplay,
+ NSEC_PER_SEC / 1000),
+ mode->crtc_clock);
+}
+
+static bool
+hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state)
+{
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!connector->dp.dsc_hblank_expansion_quirk)
+ return false;
+
+ if (mode_hblank_period_ns(adjusted_mode) > 300)
+ return false;
+
+ return true;
+}
+
+static bool
+adjust_limits_for_dsc_hblank_expansion_quirk(const struct intel_connector *connector,
+ const struct intel_crtc_state *crtc_state,
+ struct link_config_limits *limits,
+ bool dsc)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ int min_bpp_x16 = limits->link.min_bpp_x16;
+
+ if (!hblank_expansion_quirk_needs_dsc(connector, crtc_state))
+ return true;
+
+ if (!dsc) {
+ if (intel_dp_mst_dsc_source_support(crtc_state)) {
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] DSC needed by hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name);
+ return false;
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to 24 due to hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name);
+
+ if (limits->link.max_bpp_x16 < to_bpp_x16(24))
+ return false;
+
+ limits->link.min_bpp_x16 = to_bpp_x16(24);
+
+ return true;
+ }
+
+ drm_WARN_ON(&i915->drm, limits->min_rate != limits->max_rate);
+
+ if (limits->max_rate < 540000)
+ min_bpp_x16 = to_bpp_x16(13);
+ else if (limits->max_rate < 810000)
+ min_bpp_x16 = to_bpp_x16(10);
+
+ if (limits->link.min_bpp_x16 >= min_bpp_x16)
+ return true;
+
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s][CONNECTOR:%d:%s] Increasing link min bpp to " BPP_X16_FMT " in DSC mode due to hblank expansion quirk\n",
+ crtc->base.base.id, crtc->base.name,
+ connector->base.base.id, connector->base.name,
+ BPP_X16_ARGS(min_bpp_x16));
+
+ if (limits->link.max_bpp_x16 < min_bpp_x16)
+ return false;
+
+ limits->link.min_bpp_x16 = min_bpp_x16;
+
+ return true;
+}
+
+static bool
intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
+ const struct intel_connector *connector,
struct intel_crtc_state *crtc_state,
bool dsc,
struct link_config_limits *limits)
@@ -325,10 +505,16 @@ intel_dp_mst_compute_config_limits(struct intel_dp *intel_dp,
intel_dp_adjust_compliance_config(intel_dp, crtc_state, limits);
- return intel_dp_compute_config_link_bpp_limits(intel_dp,
- crtc_state,
- dsc,
- limits);
+ if (!intel_dp_compute_config_link_bpp_limits(intel_dp,
+ crtc_state,
+ dsc,
+ limits))
+ return false;
+
+ return adjust_limits_for_dsc_hblank_expansion_quirk(connector,
+ crtc_state,
+ limits,
+ dsc);
}
static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
@@ -338,12 +524,18 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder);
struct intel_dp *intel_dp = &intel_mst->primary->dp;
+ const struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
struct link_config_limits limits;
bool dsc_needed;
int ret = 0;
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -353,6 +545,7 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
dsc_needed = intel_dp->force_dsc_en ||
!intel_dp_mst_compute_config_limits(intel_dp,
+ connector,
pipe_config,
false,
&limits);
@@ -374,7 +567,11 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
str_yes_no(ret),
str_yes_no(intel_dp->force_dsc_en));
+ if (!intel_dp_mst_dsc_source_support(pipe_config))
+ return -EINVAL;
+
if (!intel_dp_mst_compute_config_limits(intel_dp,
+ connector,
pipe_config,
true,
&limits))
@@ -417,7 +614,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
- intel_ddi_compute_min_voltage_level(dev_priv, pipe_config);
+ intel_ddi_compute_min_voltage_level(pipe_config);
+
+ intel_psr_compute_config(intel_dp, pipe_config, conn_state);
return 0;
}
@@ -458,6 +657,130 @@ intel_dp_mst_transcoder_mask(struct intel_atomic_state *state,
return transcoders;
}
+static u8 get_pipes_downstream_of_mst_port(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct drm_dp_mst_port *parent_port)
+{
+ const struct intel_digital_connector_state *conn_state;
+ struct intel_connector *connector;
+ u8 mask = 0;
+ int i;
+
+ for_each_new_intel_connector_in_state(state, connector, conn_state, i) {
+ if (!conn_state->base.crtc)
+ continue;
+
+ if (&connector->mst_port->mst_mgr != mst_mgr)
+ continue;
+
+ if (connector->port != parent_port &&
+ !drm_dp_mst_port_downstream_of_parent(mst_mgr,
+ connector->port,
+ parent_port))
+ continue;
+
+ mask |= BIT(to_intel_crtc(conn_state->base.crtc)->pipe);
+ }
+
+ return mask;
+}
+
+static int intel_dp_mst_check_fec_change(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+ u8 mst_pipe_mask;
+ u8 fec_pipe_mask = 0;
+ int ret;
+
+ mst_pipe_mask = get_pipes_downstream_of_mst_port(state, mst_mgr, NULL);
+
+ for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, mst_pipe_mask) {
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ /* Atomic connector check should've added all the MST CRTCs. */
+ if (drm_WARN_ON(&i915->drm, !crtc_state))
+ return -EINVAL;
+
+ if (crtc_state->fec_enable)
+ fec_pipe_mask |= BIT(crtc->pipe);
+ }
+
+ if (!fec_pipe_mask || mst_pipe_mask == fec_pipe_mask)
+ return 0;
+
+ limits->force_fec_pipes |= mst_pipe_mask;
+
+ ret = intel_modeset_pipes_in_mask_early(state, "MST FEC",
+ mst_pipe_mask);
+
+ return ret ? : -EAGAIN;
+}
+
+static int intel_dp_mst_check_bw(struct intel_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mst_mgr,
+ struct drm_dp_mst_topology_state *mst_state,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_dp_mst_port *mst_port;
+ u8 mst_port_pipes;
+ int ret;
+
+ ret = drm_dp_mst_atomic_check_mgr(&state->base, mst_mgr, mst_state, &mst_port);
+ if (ret != -ENOSPC)
+ return ret;
+
+ mst_port_pipes = get_pipes_downstream_of_mst_port(state, mst_mgr, mst_port);
+
+ ret = intel_link_bw_reduce_bpp(state, limits,
+ mst_port_pipes, "MST link BW");
+
+ return ret ? : -EAGAIN;
+}
+
+/**
+ * intel_dp_mst_atomic_check_link - check all modeset MST link configuration
+ * @state: intel atomic state
+ * @limits: link BW limits
+ *
+ * Check the link configuration for all modeset MST outputs. If the
+ * configuration is invalid @limits will be updated if possible to
+ * reduce the total BW, after which the configuration for all CRTCs in
+ * @state must be recomputed with the updated @limits.
+ *
+ * Returns:
+ * - 0 if the confugration is valid
+ * - %-EAGAIN, if the configuration is invalid and @limits got updated
+ * with fallback values with which the configuration of all CRTCs in
+ * @state must be recomputed
+ * - Other negative error, if the configuration is invalid without a
+ * fallback possibility, or the check failed for another reason
+ */
+int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ int ret;
+ int i;
+
+ for_each_new_mst_mgr_in_state(&state->base, mgr, mst_state, i) {
+ ret = intel_dp_mst_check_fec_change(state, mgr, limits);
+ if (ret)
+ return ret;
+
+ ret = intel_dp_mst_check_bw(state, mgr, mst_state,
+ limits);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
@@ -478,19 +801,23 @@ static int intel_dp_mst_compute_config_late(struct intel_encoder *encoder,
* that shares the same MST stream as mode changed,
* intel_modeset_pipe_config()+intel_crtc_check_fastset() will take care to do
* a fastset when possible.
+ *
+ * On TGL+ this is required since each stream go through a master transcoder,
+ * so if the master transcoder needs modeset, all other streams in the
+ * topology need a modeset. All platforms need to add the atomic state
+ * for all streams in the topology, since a modeset on one may require
+ * changing the MST link BW usage of the others, which in turn needs a
+ * recomputation of the corresponding CRTC states.
*/
static int
-intel_dp_mst_atomic_master_trans_check(struct intel_connector *connector,
- struct intel_atomic_state *state)
+intel_dp_mst_atomic_topology_check(struct intel_connector *connector,
+ struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector_iter;
int ret = 0;
- if (DISPLAY_VER(dev_priv) < 12)
- return 0;
-
if (!intel_connector_needs_modeset(state, &connector->base))
return 0;
@@ -544,7 +871,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector,
if (ret)
return ret;
- ret = intel_dp_mst_atomic_master_trans_check(intel_connector, state);
+ ret = intel_dp_mst_atomic_topology_check(intel_connector, state);
if (ret)
return ret;
@@ -586,10 +913,6 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector =
to_intel_connector(old_conn_state->connector);
- struct drm_dp_mst_topology_state *new_mst_state =
- drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
- struct drm_dp_mst_atomic_payload *new_payload =
- drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
drm_dbg_kms(&i915->drm, "active links %d\n",
@@ -597,9 +920,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector);
- drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
-
- intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
+ intel_dp_sink_disable_decompression(state, connector, old_crtc_state);
}
static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
@@ -633,6 +954,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_disable_transcoder(old_crtc_state);
+ drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
+
clear_act_sent(encoder, old_crtc_state);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
@@ -645,6 +968,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_ddi_disable_transcoder_func(old_crtc_state);
+ intel_dsc_disable(old_crtc_state);
+
if (DISPLAY_VER(dev_priv) >= 9)
skl_scaler_disable(old_crtc_state);
else
@@ -661,9 +986,8 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
* BSpec 4287: disable DIP after the transcoder is disabled and before
* the transcoder clock select is set to none.
*/
- if (last_mst_stream)
- intel_dp_set_infoframes(&dig_port->base, false,
- old_crtc_state, NULL);
+ intel_dp_set_infoframes(&dig_port->base, false,
+ old_crtc_state, NULL);
/*
* From TGL spec: "If multi-stream slave transcoder: Configure
* Transcoder Clock Select to direct no clock to the transcoder"
@@ -753,6 +1077,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
drm_dp_send_power_updown_phy(&intel_dp->mst_mgr, connector->port, true);
+ intel_dp_sink_enable_decompression(state, connector, pipe_config);
+
if (first_mst_stream)
dig_port->base.pre_enable(state, &dig_port->base,
pipe_config, NULL);
@@ -775,6 +1101,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) < 12 || !first_mst_stream)
intel_ddi_enable_transcoder_clock(encoder, pipe_config);
+ intel_dsc_dp_pps_write(&dig_port->base, pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
}
@@ -791,11 +1118,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
enum transcoder trans = pipe_config->cpu_transcoder;
+ bool first_mst_stream = intel_dp->active_mst_links == 1;
drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
- clear_act_sent(encoder, pipe_config);
-
if (intel_dp_is_uhbr(pipe_config)) {
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
@@ -809,6 +1135,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, pipe_config);
+ clear_act_sent(encoder, pipe_config);
+
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
@@ -817,15 +1145,16 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, pipe_config);
+ if (first_mst_stream)
+ intel_ddi_wait_for_fec_status(encoder, pipe_config, true);
+
drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base,
drm_atomic_get_mst_payload_state(mst_state, connector->port));
- if (DISPLAY_VER(dev_priv) >= 14 && pipe_config->fec_enable)
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(trans), 0,
- FECSTALL_DIS_DPTSTREAM_DPTTG);
- else if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
- FECSTALL_DIS_DPTSTREAM_DPTTG);
+ if (DISPLAY_VER(dev_priv) >= 12)
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, trans),
+ FECSTALL_DIS_DPTSTREAM_DPTTG,
+ pipe_config->fec_enable ? FECSTALL_DIS_DPTSTREAM_DPTTG : 0);
intel_audio_sdp_split_update(pipe_config);
@@ -833,12 +1162,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_crtc_vblank_on(pipe_config);
- intel_audio_codec_enable(encoder, pipe_config, conn_state);
-
- /* Enable hdcp if it's desired */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED)
- intel_hdcp_enable(state, encoder, pipe_config, conn_state);
+ intel_hdcp_enable(state, encoder, pipe_config, conn_state);
}
static bool intel_dp_mst_enc_get_hw_state(struct intel_encoder *encoder,
@@ -977,6 +1301,18 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
if (ret)
return ret;
+ /*
+ * TODO:
+ * - Also check if compression would allow for the mode
+ * - Calculate the overhead using drm_dp_bw_overhead() /
+ * drm_dp_bw_channel_coding_efficiency(), similarly to the
+ * compute config code, as drm_dp_calc_pbn_mode() doesn't
+ * account with all the overheads.
+ * - Check here and during compute config the BW reported by
+ * DFP_Link_Available_Payload_Bandwidth_Number (or the
+ * corresponding link capabilities of the sink) in case the
+ * stream is uncompressed for it by the last branch device.
+ */
if (mode_rate > max_rate || mode->clock > max_dotclk ||
drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
@@ -1002,7 +1338,7 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector,
return 0;
}
- if (DISPLAY_VER(dev_priv) >= 10 &&
+ if (HAS_DSC_MST(dev_priv) &&
drm_dp_sink_supports_dsc(intel_connector->dp.dsc_dpcd)) {
/*
* TBD pass the connector BPC,
@@ -1150,6 +1486,36 @@ intel_dp_mst_read_decompression_port_dsc_caps(struct intel_dp *intel_dp,
intel_dp_get_dsc_sink_cap(dpcd_caps[DP_DPCD_REV], connector);
}
+static bool detect_dsc_hblank_expansion_quirk(const struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct drm_dp_desc desc;
+ u8 dpcd[DP_RECEIVER_CAP_SIZE];
+
+ if (!connector->dp.dsc_decompression_aux)
+ return false;
+
+ if (drm_dp_read_desc(connector->dp.dsc_decompression_aux,
+ &desc, true) < 0)
+ return false;
+
+ if (!drm_dp_has_quirk(&desc,
+ DP_DPCD_QUIRK_HBLANK_EXPANSION_REQUIRES_DSC))
+ return false;
+
+ if (drm_dp_read_dpcd_caps(connector->dp.dsc_decompression_aux, dpcd) < 0)
+ return false;
+
+ if (!(dpcd[DP_RECEIVE_PORT_0_CAP_0] & DP_HBLANK_EXPANSION_CAPABLE))
+ return false;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] DSC HBLANK expansion quirk detected\n",
+ connector->base.base.id, connector->base.name);
+
+ return true;
+}
+
static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
const char *pathprop)
@@ -1168,17 +1534,15 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
return NULL;
intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
+ intel_connector->sync_state = intel_dp_connector_sync_state;
intel_connector->mst_port = intel_dp;
intel_connector->port = port;
drm_dp_mst_get_port_malloc(port);
- /*
- * TODO: set the AUX for the actual MST port decompressing the stream.
- * At the moment the driver only supports enabling this globally in the
- * first downstream MST branch, via intel_dp's (root port) AUX.
- */
- intel_connector->dp.dsc_decompression_aux = &intel_dp->aux;
+ intel_connector->dp.dsc_decompression_aux = drm_dp_mst_dsc_aux_for_port(port);
intel_dp_mst_read_decompression_port_dsc_caps(intel_dp, intel_connector);
+ intel_connector->dp.dsc_hblank_expansion_quirk =
+ detect_dsc_hblank_expansion_quirk(intel_connector);
connector = &intel_connector->base;
ret = drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs,
@@ -1271,6 +1635,8 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *dig_port, enum pipe
intel_encoder->pre_pll_enable = intel_mst_pre_pll_enable_dp;
intel_encoder->pre_enable = intel_mst_pre_enable_dp;
intel_encoder->enable = intel_mst_enable_dp;
+ intel_encoder->audio_enable = intel_audio_codec_enable;
+ intel_encoder->audio_disable = intel_audio_codec_disable;
intel_encoder->get_hw_state = intel_dp_mst_enc_get_hw_state;
intel_encoder->get_config = intel_dp_mst_enc_get_config;
intel_encoder->initial_fastset_check = intel_dp_mst_initial_fastset_check;
@@ -1418,3 +1784,91 @@ int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
return 0;
}
+
+static struct intel_connector *
+get_connector_in_state_for_crtc(struct intel_atomic_state *state,
+ const struct intel_crtc *crtc)
+{
+ struct drm_connector_state *old_conn_state;
+ struct drm_connector_state *new_conn_state;
+ struct drm_connector *_connector;
+ int i;
+
+ for_each_oldnew_connector_in_state(&state->base, _connector,
+ old_conn_state, new_conn_state, i) {
+ struct intel_connector *connector =
+ to_intel_connector(_connector);
+
+ if (old_conn_state->crtc == &crtc->base ||
+ new_conn_state->crtc == &crtc->base)
+ return connector;
+ }
+
+ return NULL;
+}
+
+/**
+ * intel_dp_mst_crtc_needs_modeset - check if changes in topology need to modeset the given CRTC
+ * @state: atomic state
+ * @crtc: CRTC for which to check the modeset requirement
+ *
+ * Check if any change in a MST topology requires a forced modeset on @crtc in
+ * this topology. One such change is enabling/disabling the DSC decompression
+ * state in the first branch device's UFP DPCD as required by one CRTC, while
+ * the other @crtc in the same topology is still active, requiring a full modeset
+ * on @crtc.
+ */
+bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_connector *crtc_connector;
+ const struct drm_connector_state *conn_state;
+ const struct drm_connector *_connector;
+ int i;
+
+ if (!intel_crtc_has_type(intel_atomic_get_new_crtc_state(state, crtc),
+ INTEL_OUTPUT_DP_MST))
+ return false;
+
+ crtc_connector = get_connector_in_state_for_crtc(state, crtc);
+
+ if (!crtc_connector)
+ /* None of the connectors in the topology needs modeset */
+ return false;
+
+ for_each_new_connector_in_state(&state->base, _connector, conn_state, i) {
+ const struct intel_connector *connector =
+ to_intel_connector(_connector);
+ const struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc_iter;
+
+ if (connector->mst_port != crtc_connector->mst_port ||
+ !conn_state->crtc)
+ continue;
+
+ crtc_iter = to_intel_crtc(conn_state->crtc);
+
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc_iter);
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc_iter);
+
+ if (!intel_crtc_needs_modeset(new_crtc_state))
+ continue;
+
+ if (old_crtc_state->dsc.compression_enable ==
+ new_crtc_state->dsc.compression_enable)
+ continue;
+ /*
+ * Toggling the decompression flag because of this stream in
+ * the first downstream branch device's UFP DPCD may reset the
+ * whole branch device. To avoid the reset while other streams
+ * are also active modeset the whole MST topology in this
+ * case.
+ */
+ if (connector->dp.dsc_decompression_aux ==
+ &connector->mst_port->aux)
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index f1815bb722..8ca1d59909 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -13,6 +13,7 @@ struct intel_crtc;
struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
+struct intel_link_bw_limits;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
@@ -22,5 +23,9 @@ bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
int intel_dp_mst_add_topology_state_for_crtc(struct intel_atomic_state *state,
struct intel_crtc *crtc);
+int intel_dp_mst_atomic_check_link(struct intel_atomic_state *state,
+ struct intel_link_bw_limits *limits);
+bool intel_dp_mst_crtc_needs_modeset(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 62b93d097e..4ca910874a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -666,6 +666,20 @@ enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port)
}
}
+enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
+{
+ switch (pipe) {
+ default:
+ MISSING_CASE(pipe);
+ fallthrough;
+ case PIPE_A:
+ case PIPE_B:
+ return DPIO_PHY0;
+ case PIPE_C:
+ return DPIO_PHY1;
+ }
+}
+
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
{
switch (pipe) {
@@ -689,50 +703,50 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 val;
int i;
vlv_dpio_get(dev_priv);
/* Clear calc init */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW9(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW9(ch));
val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW9(ch), val);
}
/* Program swing deemph */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW4(ch, i));
val &= ~DPIO_SWING_DEEMPH9P5_MASK;
val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW4(ch, i), val);
}
/* Program swing margin */
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW2(ch, i));
val &= ~DPIO_SWING_MARGIN000_MASK;
val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
@@ -745,7 +759,7 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW2(ch, i), val);
}
/*
@@ -755,23 +769,23 @@ void chv_set_phy_signal_level(struct intel_encoder *encoder,
* 27 for ch0 and ch1.
*/
for (i = 0; i < crtc_state->lane_count; i++) {
- val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
+ val = vlv_dpio_read(dev_priv, phy, CHV_TX_DW3(ch, i));
if (uniq_trans_scale)
val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
else
val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW3(ch, i), val);
}
/* Start swing calculation */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW10(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW10(ch));
val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW10(ch), val);
}
vlv_dpio_put(dev_priv);
@@ -782,43 +796,43 @@ void chv_data_lane_soft_reset(struct intel_encoder *encoder,
bool reset)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum pipe pipe = crtc->pipe;
+ enum dpio_channel ch = vlv_dig_port_to_channel(enc_to_dig_port(encoder));
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 val;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW0(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW0(ch));
if (reset)
val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
else
val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW0(ch), val);
}
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW1(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW1(ch));
val |= CHV_PCS_REQ_SOFTRESET_EN;
if (reset)
val &= ~DPIO_PCS_CLK_SOFT_RESET;
else
val |= DPIO_PCS_CLK_SOFT_RESET;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW1(ch), val);
}
}
@@ -829,6 +843,7 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
unsigned int lane_mask =
intel_dp_unused_lane_mask(crtc_state->lane_count);
@@ -851,40 +866,40 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
/* program left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA1_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA1_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
if (ch == DPIO_CH0)
val |= CHV_BUFLEFTENA2_FORCE;
if (ch == DPIO_CH1)
val |= CHV_BUFRIGHTENA2_FORCE;
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
}
/* program clock channel usage */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW8(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW8(ch));
val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
if (pipe != PIPE_B)
val &= ~CHV_PCS_USEDCLKCHANNEL;
else
val |= CHV_PCS_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW8(ch), val);
}
/*
@@ -892,12 +907,12 @@ void chv_phy_pre_pll_enable(struct intel_encoder *encoder,
* matches the pipe, but here we need to
* pick the CL based on the port.
*/
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW19(ch));
if (pipe != PIPE_B)
val &= ~CHV_CMN_USEDCLKCHANNEL;
else
val |= CHV_CMN_USEDCLKCHANNEL;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW19(ch), val);
vlv_dpio_put(dev_priv);
}
@@ -910,21 +925,21 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel ch = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
int data, i, stagger;
u32 val;
vlv_dpio_get(dev_priv);
/* allow hardware to manage TX FIFO reset source */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
}
/* Program Tx lane latency optimal setting*/
@@ -934,7 +949,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
data = 0x0;
else
data = (i == 1) ? 0x0 : 0x1;
- vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
+ vlv_dpio_write(dev_priv, phy, CHV_TX_DW14(ch, i),
data << DPIO_UPAR_SHIFT);
}
@@ -950,17 +965,17 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
else
stagger = 0x2;
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW11(ch), val);
if (crtc_state->lane_count > 2) {
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS23_DW11(ch));
val |= DPIO_TX2_STAGGER_MASK(0x1f);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW11(ch), val);
}
- vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS01_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -968,7 +983,7 @@ void chv_phy_pre_encoder_enable(struct intel_encoder *encoder,
DPIO_TX2_STAGGER_MULT(0));
if (crtc_state->lane_count > 2) {
- vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS23_DW12(ch),
DPIO_LANESTAGGER_STRAP(stagger) |
DPIO_LANESTAGGER_STRAP_OVRD |
DPIO_TX1_STAGGER_MASK(0x1f) |
@@ -998,19 +1013,20 @@ void chv_phy_post_pll_disable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(old_crtc_state->uapi.crtc)->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* disable left/right clock distribution */
if (pipe != PIPE_B) {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW5_CH0);
val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW5_CH0, val);
} else {
- val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
+ val = vlv_dpio_read(dev_priv, phy, _CHV_CMN_DW1_CH1);
val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
- vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
+ vlv_dpio_write(dev_priv, phy, _CHV_CMN_DW1_CH1, val);
}
vlv_dpio_put(dev_priv);
@@ -1036,22 +1052,22 @@ void vlv_set_phy_signal_level(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW4(port), demph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW2(port),
uniqtranscale_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW3(port), 0x0C782040);
if (tx3_demph)
- vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
+ vlv_dpio_write(dev_priv, phy, VLV_TX3_DW4(port), tx3_demph);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW11(port), 0x00030000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW9(port), preemph_reg_value);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
vlv_dpio_put(dev_priv);
}
@@ -1063,24 +1079,24 @@ void vlv_phy_pre_pll_enable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
/* Program Tx lane resets to default */
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port),
DPIO_PCS_TX_LANE2_RESET |
DPIO_PCS_TX_LANE1_RESET);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port),
DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
(1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
DPIO_PCS_CLK_SOFT_RESET);
/* Fix up inter-pair skew failure */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
- vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW12(port), 0x00750f00);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW11(port), 0x00001500);
+ vlv_dpio_write(dev_priv, phy, VLV_TX_DW14(port), 0x40400000);
vlv_dpio_put(dev_priv);
}
@@ -1094,23 +1110,24 @@ void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
vlv_dpio_get(dev_priv);
/* Enable clock channels for this port */
- val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
+ val = vlv_dpio_read(dev_priv, phy, VLV_PCS01_DW8(port));
val = 0;
if (pipe)
val |= (1<<21);
else
val &= ~(1<<21);
val |= 0x001000c4;
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW8(port), val);
/* Program lane clock */
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW14(port), 0x00760018);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW23(port), 0x00400888);
vlv_dpio_put(dev_priv);
}
@@ -1122,10 +1139,10 @@ void vlv_phy_reset_lanes(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum dpio_channel port = vlv_dig_port_to_channel(dig_port);
- enum pipe pipe = crtc->pipe;
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
vlv_dpio_get(dev_priv);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
- vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW0(port), 0x00000000);
+ vlv_dpio_write(dev_priv, phy, VLV_PCS_DW1(port), 0x00e00060);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 4d43dbbdf8..9adc4e8c17 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -44,6 +44,7 @@ u8 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder);
enum dpio_channel vlv_dig_port_to_channel(struct intel_digital_port *dig_port);
enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_port);
+enum dpio_phy vlv_pipe_to_phy(enum pipe pipe);
enum dpio_channel vlv_pipe_to_channel(enum pipe pipe);
void chv_set_phy_signal_level(struct intel_encoder *encoder,
@@ -116,6 +117,10 @@ static inline enum dpio_phy vlv_dig_port_to_phy(struct intel_digital_port *dig_p
{
return DPIO_PHY0;
}
+static inline enum dpio_phy vlv_pipe_to_phy(enum pipe pipe)
+{
+ return DPIO_PHY0;
+}
static inline enum dpio_channel vlv_pipe_to_channel(enum pipe pipe)
{
return DPIO_CH0;
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index d41c1dc9f6..3038655377 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -16,6 +16,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
+#include "intel_lvds_regs.h"
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_snps_phy.h"
@@ -311,7 +312,7 @@ static const struct intel_limit intel_limits_bxt = {
* divided-down version of it.
*/
/* m1 is reserved as 0 in Pineview, n is a ring counter */
-int pnv_calc_dpll_params(int refclk, struct dpll *clock)
+static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
@@ -342,7 +343,7 @@ int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-int vlv_calc_dpll_params(int refclk, struct dpll *clock)
+static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
{
clock->m = clock->m1 * clock->m2;
clock->p = clock->p1 * clock->p2 * 5;
@@ -368,6 +369,176 @@ int chv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
+static int i9xx_pll_refclk(struct drm_device *dev,
+ const struct intel_crtc_state *pipe_config)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+
+ if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
+ return dev_priv->display.vbt.lvds_ssc_freq;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ return 120000;
+ else if (DISPLAY_VER(dev_priv) != 2)
+ return 96000;
+ else
+ return 48000;
+}
+
+/* Returns the clock of the currently programmed mode of the given pipe. */
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u32 dpll = pipe_config->dpll_hw_state.dpll;
+ u32 fp;
+ struct dpll clock;
+ int port_clock;
+ int refclk = i9xx_pll_refclk(dev, pipe_config);
+
+ if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
+ fp = pipe_config->dpll_hw_state.fp0;
+ else
+ fp = pipe_config->dpll_hw_state.fp1;
+
+ clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
+ if (IS_PINEVIEW(dev_priv)) {
+ clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
+ clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ } else {
+ clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
+ clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
+ }
+
+ if (DISPLAY_VER(dev_priv) != 2) {
+ if (IS_PINEVIEW(dev_priv))
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
+ else
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ switch (dpll & DPLL_MODE_MASK) {
+ case DPLLB_MODE_DAC_SERIAL:
+ clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
+ 5 : 10;
+ break;
+ case DPLLB_MODE_LVDS:
+ clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
+ 7 : 14;
+ break;
+ default:
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ return;
+ }
+
+ if (IS_PINEVIEW(dev_priv))
+ port_clock = pnv_calc_dpll_params(refclk, &clock);
+ else
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ } else {
+ enum pipe lvds_pipe;
+
+ if (IS_I85X(dev_priv) &&
+ intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ lvds_pipe == crtc->pipe) {
+ u32 lvds = intel_de_read(dev_priv, LVDS);
+
+ clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT);
+
+ if (lvds & LVDS_CLKB_POWER_UP)
+ clock.p2 = 7;
+ else
+ clock.p2 = 14;
+ } else {
+ if (dpll & PLL_P1_DIVIDE_BY_TWO)
+ clock.p1 = 2;
+ else {
+ clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
+ DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
+ }
+ if (dpll & PLL_P2_DIVIDE_BY_4)
+ clock.p2 = 4;
+ else
+ clock.p2 = 2;
+ }
+
+ port_clock = i9xx_calc_dpll_params(refclk, &clock);
+ }
+
+ /*
+ * This value includes pixel_multiplier. We will use
+ * port_clock to compute adjusted_mode.crtc_clock in the
+ * encoder's get_config() function.
+ */
+ pipe_config->port_clock = port_clock;
+}
+
+void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct dpll clock;
+ u32 mdiv;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ mdiv = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW3(crtc->pipe));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
+ clock.m2 = mdiv & DPIO_M2DIV_MASK;
+ clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
+ clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
+ clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
+}
+
+void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ enum dpio_channel port = vlv_pipe_to_channel(crtc->pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
+ struct dpll clock;
+ u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
+ int refclk = 100000;
+
+ /* In case of DSI, DPLL will not be used */
+ if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
+ return;
+
+ vlv_dpio_get(dev_priv);
+ cmn_dw13 = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW13(port));
+ pll_dw0 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW0(port));
+ pll_dw1 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW1(port));
+ pll_dw2 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW2(port));
+ pll_dw3 = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
+ vlv_dpio_put(dev_priv);
+
+ clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
+ clock.m2 = (pll_dw0 & 0xff) << 22;
+ if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
+ clock.m2 |= pll_dw2 & 0x3fffff;
+ clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
+ clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
+ clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
+
+ pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
+}
+
/*
* Returns whether the given set of divisors are valid for a given refclk with
* the given connectors.
@@ -1003,12 +1174,10 @@ static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- enum phy phy = intel_port_to_phy(i915, encoder->port);
int ret;
ret = intel_cx0pll_calc_state(crtc_state, encoder);
@@ -1016,10 +1185,7 @@ static int mtl_crtc_compute_clock(struct intel_atomic_state *state,
return ret;
/* TODO: Do the readback via intel_compute_shared_dplls() */
- if (intel_is_c10phy(i915, phy))
- crtc_state->port_clock = intel_c10pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c10);
- else
- crtc_state->port_clock = intel_c20pll_calc_port_clock(encoder, &crtc_state->cx0pll_state.c20);
+ crtc_state->port_clock = intel_cx0pll_calc_port_clock(encoder, &crtc_state->cx0pll_state);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -1645,7 +1811,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
}
static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
- enum pipe pipe)
+ enum dpio_phy phy)
{
u32 reg_val;
@@ -1653,30 +1819,31 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
* PLLB opamp always calibrates to max value of 0x3f, force enable it
* and set it to a reasonable value instead.
*/
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
reg_val |= 0x00000030;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0x8c000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW9(1));
reg_val &= 0xffffff00;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9(1), reg_val);
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_REF_DW13);
reg_val &= 0x00ffffff;
reg_val |= 0xb0000000;
- vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_REF_DW13, reg_val);
}
static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -1694,18 +1861,18 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
/* PLL B needs special handling */
if (pipe == PIPE_B)
- vlv_pllb_recal_opamp(dev_priv, pipe);
+ vlv_pllb_recal_opamp(dev_priv, phy);
/* Set up Tx target for periodic Rcomp update */
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW9_BCAST, 0x0100000f);
/* Disable target IRef on PLL */
- reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
+ reg_val = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW8(pipe));
reg_val &= 0x00ffffff;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW8(pipe), reg_val);
/* Disable fast lock */
- vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
+ vlv_dpio_write(dev_priv, phy, VLV_CMN_DW0, 0x610);
/* Set idtafcrecal before PLL is enabled */
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
@@ -1719,46 +1886,46 @@ static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
* Note: don't use the DAC post divider as it seems unstable.
*/
mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
mdiv |= DPIO_ENABLE_CALIBRATION;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
if (crtc_state->port_clock == 162000 ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
0x009f0003);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW10(pipe),
0x00d0000f);
if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df40000);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df70000);
} else { /* HDMI or VGA */
/* Use bend source */
if (pipe == PIPE_A)
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df70000);
else
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW5(pipe),
0x0df40000);
}
- coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
+ coreclk = vlv_dpio_read(dev_priv, phy, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW7(pipe), coreclk);
- vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
+ vlv_dpio_write(dev_priv, phy, VLV_PLL_DW11(pipe), 0x87871000);
vlv_dpio_put(dev_priv);
}
@@ -1809,6 +1976,7 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 loopfilter, tribuf_calcntr;
u32 bestm2, bestp1, bestp2, bestm2_frac;
u32 dpio_val;
@@ -1825,39 +1993,39 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
vlv_dpio_get(dev_priv);
/* p1 and p2 divider */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW13(port),
5 << DPIO_CHV_S1_DIV_SHIFT |
bestp1 << DPIO_CHV_P1_DIV_SHIFT |
bestp2 << DPIO_CHV_P2_DIV_SHIFT |
1 << DPIO_CHV_K_DIV_SHIFT);
/* Feedback post-divider - m2 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW0(port), bestm2);
/* Feedback refclk divider - n and m1 */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW1(port),
DPIO_CHV_M1_DIV_BY_2 |
1 << DPIO_CHV_N_DIV_SHIFT);
/* M2 fraction division */
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW2(port), bestm2_frac);
/* M2 fraction division enable */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW3(port));
dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
if (bestm2_frac)
dpio_val |= DPIO_CHV_FRAC_DIV_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW3(port), dpio_val);
/* Program digital lock detect threshold */
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW9(port));
dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
if (!bestm2_frac)
dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW9(port), dpio_val);
/* Loop filter */
if (vco == 5400000) {
@@ -1882,16 +2050,16 @@ static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
tribuf_calcntr = 0;
}
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW6(port), loopfilter);
- dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
+ dpio_val = vlv_dpio_read(dev_priv, phy, CHV_PLL_DW8(port));
dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
- vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
+ vlv_dpio_write(dev_priv, phy, CHV_PLL_DW8(port), dpio_val);
/* AFC Recal */
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
- vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port),
+ vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port)) |
DPIO_AFC_RECAL);
vlv_dpio_put(dev_priv);
@@ -1903,14 +2071,15 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(crtc->pipe);
u32 tmp;
vlv_dpio_get(dev_priv);
/* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), tmp);
vlv_dpio_put(dev_priv);
@@ -2031,6 +2200,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
{
enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
u32 val;
/* Make sure the pipe isn't still relying on us */
@@ -2047,9 +2217,9 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
vlv_dpio_get(dev_priv);
/* Disable 10bit clock to display controller */
- val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ val = vlv_dpio_read(dev_priv, phy, CHV_CMN_DW14(port));
val &= ~DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
+ vlv_dpio_write(dev_priv, phy, CHV_CMN_DW14(port), val);
vlv_dpio_put(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index bbc30542f2..ac01bb19cc 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -20,8 +20,6 @@ int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc);
-int vlv_calc_dpll_params(int refclk, struct dpll *clock);
-int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
@@ -41,6 +39,13 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+void i9xx_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void vlv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+void chv_crtc_clock_get(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config);
+
void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index fb694ff9dc..87deb135c9 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -219,6 +219,26 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915,
return MG_PLL_ENABLE(tc_port);
}
+static void _intel_enable_shared_dpll(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (pll->info->power_domain)
+ pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
+
+ pll->info->funcs->enable(i915, pll);
+ pll->on = true;
+}
+
+static void _intel_disable_shared_dpll(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+
+ if (pll->info->power_domain)
+ intel_display_power_put(i915, pll->info->power_domain, pll->wakeref);
+}
+
/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
@@ -258,8 +278,8 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
drm_WARN_ON(&i915->drm, pll->on);
drm_dbg_kms(&i915->drm, "enabling %s\n", pll->info->name);
- pll->info->funcs->enable(i915, pll);
- pll->on = true;
+
+ _intel_enable_shared_dpll(i915, pll);
out:
mutex_unlock(&i915->display.dpll.lock);
@@ -304,8 +324,8 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
goto out;
drm_dbg_kms(&i915->drm, "disabling %s\n", pll->info->name);
- pll->info->funcs->disable(i915, pll);
- pll->on = false;
+
+ _intel_disable_shared_dpll(i915, pll);
out:
mutex_unlock(&i915->display.dpll.lock);
@@ -3844,18 +3864,6 @@ static void combo_pll_enable(struct drm_i915_private *i915,
{
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
-
- /*
- * We need to disable DC states when this DPLL is enabled.
- * This can be done by taking a reference on DPLL4 power
- * domain.
- */
- pll->wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_DC_OFF);
- }
-
icl_pll_power_enable(i915, pll, enable_reg);
icl_dpll_write(i915, pll);
@@ -3951,11 +3959,6 @@ static void combo_pll_disable(struct drm_i915_private *i915,
i915_reg_t enable_reg = intel_combo_pll_enable_reg(i915, pll);
icl_pll_disable(i915, pll, enable_reg);
-
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->info->id == DPLL_ID_EHL_DPLL4)
- intel_display_power_put(i915, POWER_DOMAIN_DC_OFF,
- pll->wakeref);
}
static void tbt_pll_disable(struct drm_i915_private *i915,
@@ -4048,7 +4051,8 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
static const struct dpll_info ehl_plls[] = {
{ .name = "DPLL 0", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL0, },
{ .name = "DPLL 1", .funcs = &combo_pll_funcs, .id = DPLL_ID_ICL_DPLL1, },
- { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4, },
+ { .name = "DPLL 4", .funcs = &combo_pll_funcs, .id = DPLL_ID_EHL_DPLL4,
+ .power_domain = POWER_DOMAIN_DC_OFF, },
{}
};
@@ -4378,12 +4382,8 @@ static void readout_dpll_hw_state(struct drm_i915_private *i915,
pll->on = intel_dpll_get_hw_state(i915, pll, &pll->state.hw_state);
- if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
- pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(i915,
- POWER_DOMAIN_DC_OFF);
- }
+ if (pll->on && pll->info->power_domain)
+ pll->wakeref = intel_display_power_get(i915, pll->info->power_domain);
pll->state.pipe_mask = 0;
for_each_intel_crtc(&i915->drm, crtc) {
@@ -4430,8 +4430,7 @@ static void sanitize_dpll_state(struct drm_i915_private *i915,
"%s enabled but not in use, disabling\n",
pll->info->name);
- pll->info->funcs->disable(i915, pll);
- pll->on = false;
+ _intel_disable_shared_dpll(i915, pll);
}
void intel_dpll_sanitize_state(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 1a88ccc983..5cdec77cfd 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -27,6 +27,7 @@
#include <linux/types.h>
+#include "intel_display_power.h"
#include "intel_wakeref.h"
#define for_each_shared_dpll(__i915, __pll, __i) \
@@ -270,6 +271,11 @@ struct dpll_info {
*/
enum intel_dpll_id id;
+ /**
+ * @power_domain: extra power domain required by the DPLL
+ */
+ enum intel_display_power_domain power_domain;
+
#define INTEL_DPLL_ALWAYS_ON (1 << 0)
#define INTEL_DPLL_IS_ALT_PORT_DPLL (1 << 1)
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index 48582b31b7..b29bceff73 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -9,8 +9,6 @@
#include "gt/gen8_ppgtt.h"
#include "i915_drv.h"
-#include "i915_reg.h"
-#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
@@ -318,25 +316,3 @@ void intel_dpt_destroy(struct i915_address_space *vm)
i915_vm_put(&dpt->vm);
}
-void intel_dpt_configure(struct intel_crtc *crtc)
-{
- struct drm_i915_private *i915 = to_i915(crtc->base.dev);
-
- if (DISPLAY_VER(i915) == 14) {
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
- PLANE_CHICKEN_DISABLE_DPT,
- i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT);
- }
- } else if (DISPLAY_VER(i915) == 13) {
- intel_de_rmw(i915, CHICKEN_MISC_2,
- CHICKEN_MISC_DISABLE_DPT,
- i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT);
- }
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index d9a1665501..e18a9f767b 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -10,7 +10,6 @@ struct drm_i915_private;
struct i915_address_space;
struct i915_vma;
-struct intel_crtc;
struct intel_framebuffer;
void intel_dpt_destroy(struct i915_address_space *vm);
@@ -20,6 +19,5 @@ void intel_dpt_suspend(struct drm_i915_private *i915);
void intel_dpt_resume(struct drm_i915_private *i915);
struct i915_address_space *
intel_dpt_create(struct intel_framebuffer *fb);
-void intel_dpt_configure(struct intel_crtc *crtc);
#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
new file mode 100644
index 0000000000..cdba47165c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include "i915_reg.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dpt_common.h"
+
+void intel_dpt_configure(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+
+ if (DISPLAY_VER(i915) == 14) {
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
+
+ intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
+ PLANE_CHICKEN_DISABLE_DPT,
+ i915->display.params.enable_dpt ? 0 :
+ PLANE_CHICKEN_DISABLE_DPT);
+ }
+ } else if (DISPLAY_VER(i915) == 13) {
+ intel_de_rmw(i915, CHICKEN_MISC_2,
+ CHICKEN_MISC_DISABLE_DPT,
+ i915->display.params.enable_dpt ? 0 :
+ CHICKEN_MISC_DISABLE_DPT);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.h b/drivers/gpu/drm/i915/display/intel_dpt_common.h
new file mode 100644
index 0000000000..6d7de40512
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt_common.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_DPT_COMMON_H__
+#define __INTEL_DPT_COMMON_H__
+
+struct intel_crtc;
+
+void intel_dpt_configure(struct intel_crtc *crtc);
+
+#endif /* __INTEL_DPT_COMMON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index 15c3dd5d38..54d9abeb2d 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -4,9 +4,6 @@
*
*/
-#include "gem/i915_gem_internal.h"
-#include "gem/i915_gem_lmem.h"
-
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
@@ -14,12 +11,13 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
+#include "intel_dsb_buffer.h"
#include "intel_dsb_regs.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_watermark.h"
-struct i915_vma;
+#define CACHELINE_BYTES 64
enum dsb_id {
INVALID_DSB = -1,
@@ -32,8 +30,7 @@ enum dsb_id {
struct intel_dsb {
enum dsb_id id;
- u32 *cmd_buf;
- struct i915_vma *vma;
+ struct intel_dsb_buffer dsb_buf;
struct intel_crtc *crtc;
/*
@@ -109,15 +106,17 @@ static void intel_dsb_dump(struct intel_dsb *dsb)
{
struct intel_crtc *crtc = dsb->crtc;
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- const u32 *buf = dsb->cmd_buf;
int i;
drm_dbg_kms(&i915->drm, "[CRTC:%d:%s] DSB %d commands {\n",
crtc->base.base.id, crtc->base.name, dsb->id);
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(&i915->drm,
- " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
- i * 4, buf[i], buf[i+1], buf[i+2], buf[i+3]);
+ " 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
+ intel_dsb_buffer_read(&dsb->dsb_buf, i),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 1),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 2),
+ intel_dsb_buffer_read(&dsb->dsb_buf, i + 3));
drm_dbg_kms(&i915->drm, "}\n");
}
@@ -129,8 +128,6 @@ static bool is_dsb_busy(struct drm_i915_private *i915, enum pipe pipe,
static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
{
- u32 *buf = dsb->cmd_buf;
-
if (!assert_dsb_has_room(dsb))
return;
@@ -139,14 +136,13 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
dsb->ins_start_offset = dsb->free_pos;
- buf[dsb->free_pos++] = ldw;
- buf[dsb->free_pos++] = udw;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, ldw);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, udw);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
u32 opcode, i915_reg_t reg)
{
- const u32 *buf = dsb->cmd_buf;
u32 prev_opcode, prev_reg;
/*
@@ -157,8 +153,10 @@ static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
if (dsb->free_pos == 0)
return false;
- prev_opcode = buf[dsb->ins_start_offset + 1] & ~DSB_REG_VALUE_MASK;
- prev_reg = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK;
+ prev_opcode = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 1) & ~DSB_REG_VALUE_MASK;
+ prev_reg = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 1) & DSB_REG_VALUE_MASK;
return prev_opcode == opcode && prev_reg == i915_mmio_reg_offset(reg);
}
@@ -191,6 +189,8 @@ static bool intel_dsb_prev_ins_is_indexed_write(struct intel_dsb *dsb, i915_reg_
void intel_dsb_reg_write(struct intel_dsb *dsb,
i915_reg_t reg, u32 val)
{
+ u32 old_val;
+
/*
* For example the buffer will look like below for 3 dwords for auto
* increment register:
@@ -214,31 +214,32 @@ void intel_dsb_reg_write(struct intel_dsb *dsb,
(DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) |
i915_mmio_reg_offset(reg));
} else {
- u32 *buf = dsb->cmd_buf;
-
if (!assert_dsb_has_room(dsb))
return;
/* convert to indexed write? */
if (intel_dsb_prev_ins_is_mmio_write(dsb, reg)) {
- u32 prev_val = buf[dsb->ins_start_offset + 0];
+ u32 prev_val = intel_dsb_buffer_read(&dsb->dsb_buf,
+ dsb->ins_start_offset + 0);
- buf[dsb->ins_start_offset + 0] = 1; /* count */
- buf[dsb->ins_start_offset + 1] =
- (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
- i915_mmio_reg_offset(reg);
- buf[dsb->ins_start_offset + 2] = prev_val;
+ intel_dsb_buffer_write(&dsb->dsb_buf,
+ dsb->ins_start_offset + 0, 1); /* count */
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 1,
+ (DSB_OPCODE_INDEXED_WRITE << DSB_OPCODE_SHIFT) |
+ i915_mmio_reg_offset(reg));
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset + 2, prev_val);
dsb->free_pos++;
}
- buf[dsb->free_pos++] = val;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos++, val);
/* Update the count */
- buf[dsb->ins_start_offset]++;
+ old_val = intel_dsb_buffer_read(&dsb->dsb_buf, dsb->ins_start_offset);
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->ins_start_offset, old_val + 1);
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
- buf[dsb->free_pos] = 0;
+ intel_dsb_buffer_write(&dsb->dsb_buf, dsb->free_pos, 0);
}
}
@@ -297,8 +298,8 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
if (aligned_tail > tail)
- memset(&dsb->cmd_buf[dsb->free_pos], 0,
- aligned_tail - tail);
+ intel_dsb_buffer_memset(&dsb->dsb_buf, dsb->free_pos, 0,
+ aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
}
@@ -317,7 +318,7 @@ void intel_dsb_finish(struct intel_dsb *dsb)
intel_dsb_align_tail(dsb);
- i915_gem_object_flush_map(dsb->vma->obj);
+ intel_dsb_buffer_flush_map(&dsb->dsb_buf);
}
static int intel_dsb_dewake_scanline(const struct intel_crtc_state *crtc_state)
@@ -375,7 +376,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
dsb_chicken(crtc));
intel_de_write_fw(dev_priv, DSB_HEAD(pipe, dsb->id),
- i915_ggtt_offset(dsb->vma));
+ intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf));
if (dewake_scanline >= 0) {
int diff, hw_dewake_scanline;
@@ -397,7 +398,7 @@ static void _intel_dsb_commit(struct intel_dsb *dsb, u32 ctrl,
}
intel_de_write_fw(dev_priv, DSB_TAIL(pipe, dsb->id),
- i915_ggtt_offset(dsb->vma) + tail);
+ intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf) + tail);
}
/**
@@ -422,7 +423,7 @@ void intel_dsb_wait(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
if (wait_for(!is_dsb_busy(dev_priv, pipe, dsb->id), 1)) {
- u32 offset = i915_ggtt_offset(dsb->vma);
+ u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf);
intel_de_write_fw(dev_priv, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
@@ -459,12 +460,9 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref;
struct intel_dsb *dsb;
- struct i915_vma *vma;
unsigned int size;
- u32 *buf;
if (!HAS_DSB(i915))
return NULL;
@@ -478,37 +476,13 @@ struct intel_dsb *intel_dsb_prepare(const struct intel_crtc_state *crtc_state,
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
- if (HAS_LMEM(i915)) {
- obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
- I915_BO_ALLOC_CONTIGUOUS);
- if (IS_ERR(obj))
- goto out_put_rpm;
- } else {
- obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
- if (IS_ERR(obj))
- goto out_put_rpm;
-
- i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
- }
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
- if (IS_ERR(vma)) {
- i915_gem_object_put(obj);
- goto out_put_rpm;
- }
-
- buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
- if (IS_ERR(buf)) {
- i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ if (!intel_dsb_buffer_create(crtc, &dsb->dsb_buf, size))
goto out_put_rpm;
- }
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
dsb->id = DSB1;
- dsb->vma = vma;
dsb->crtc = crtc;
- dsb->cmd_buf = buf;
dsb->size = size / 4; /* in dwords */
dsb->free_pos = 0;
dsb->ins_start_offset = 0;
@@ -536,6 +510,6 @@ out:
*/
void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP);
+ intel_dsb_buffer_cleanup(&dsb->dsb_buf);
kfree(dsb);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
new file mode 100644
index 0000000000..c77d48bda2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2023, Intel Corporation.
+ */
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_display_types.h"
+#include "intel_dsb_buffer.h"
+
+u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+{
+ return i915_ggtt_offset(dsb_buf->vma);
+}
+
+void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+{
+ dsb_buf->cmd_buf[idx] = val;
+}
+
+u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+{
+ return dsb_buf->cmd_buf[idx];
+}
+
+void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+{
+ WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
+
+ memset(&dsb_buf->cmd_buf[idx], val, size);
+}
+
+bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *buf;
+
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem(i915, PAGE_ALIGN(size),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return false;
+ } else {
+ obj = i915_gem_object_create_internal(i915, PAGE_ALIGN(size));
+ if (IS_ERR(obj))
+ return false;
+
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
+ }
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return false;
+ }
+
+ buf = i915_gem_object_pin_map_unlocked(vma->obj, I915_MAP_WC);
+ if (IS_ERR(buf)) {
+ i915_vma_unpin_and_release(&vma, I915_VMA_RELEASE_MAP);
+ return false;
+ }
+
+ dsb_buf->vma = vma;
+ dsb_buf->cmd_buf = buf;
+ dsb_buf->buf_size = size;
+
+ return true;
+}
+
+void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+{
+ i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
+}
+
+void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+{
+ i915_gem_object_flush_map(dsb_buf->vma->obj);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
new file mode 100644
index 0000000000..425acd3939
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _INTEL_DSB_BUFFER_H
+#define _INTEL_DSB_BUFFER_H
+
+#include <linux/types.h>
+
+struct intel_crtc;
+struct i915_vma;
+
+struct intel_dsb_buffer {
+ u32 *cmd_buf;
+ struct i915_vma *vma;
+ size_t buf_size;
+};
+
+u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf);
+void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
+u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx);
+void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
+bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf,
+ size_t size);
+void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf);
+void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 24b2cbcfc1..a5d7fc8418 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -55,43 +55,6 @@
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
#define MIPI_PORT_SHIFT 3
-/* base offsets for gpio pads */
-#define VLV_GPIO_NC_0_HV_DDI0_HPD 0x4130
-#define VLV_GPIO_NC_1_HV_DDI0_DDC_SDA 0x4120
-#define VLV_GPIO_NC_2_HV_DDI0_DDC_SCL 0x4110
-#define VLV_GPIO_NC_3_PANEL0_VDDEN 0x4140
-#define VLV_GPIO_NC_4_PANEL0_BKLTEN 0x4150
-#define VLV_GPIO_NC_5_PANEL0_BKLTCTL 0x4160
-#define VLV_GPIO_NC_6_HV_DDI1_HPD 0x4180
-#define VLV_GPIO_NC_7_HV_DDI1_DDC_SDA 0x4190
-#define VLV_GPIO_NC_8_HV_DDI1_DDC_SCL 0x4170
-#define VLV_GPIO_NC_9_PANEL1_VDDEN 0x4100
-#define VLV_GPIO_NC_10_PANEL1_BKLTEN 0x40E0
-#define VLV_GPIO_NC_11_PANEL1_BKLTCTL 0x40F0
-
-#define VLV_GPIO_PCONF0(base_offset) (base_offset)
-#define VLV_GPIO_PAD_VAL(base_offset) ((base_offset) + 8)
-
-struct gpio_map {
- u16 base_offset;
- bool init;
-};
-
-static struct gpio_map vlv_gpio_table[] = {
- { VLV_GPIO_NC_0_HV_DDI0_HPD },
- { VLV_GPIO_NC_1_HV_DDI0_DDC_SDA },
- { VLV_GPIO_NC_2_HV_DDI0_DDC_SCL },
- { VLV_GPIO_NC_3_PANEL0_VDDEN },
- { VLV_GPIO_NC_4_PANEL0_BKLTEN },
- { VLV_GPIO_NC_5_PANEL0_BKLTCTL },
- { VLV_GPIO_NC_6_HV_DDI1_HPD },
- { VLV_GPIO_NC_7_HV_DDI1_DDC_SDA },
- { VLV_GPIO_NC_8_HV_DDI1_DDC_SCL },
- { VLV_GPIO_NC_9_PANEL1_VDDEN },
- { VLV_GPIO_NC_10_PANEL1_BKLTEN },
- { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
-};
-
struct i2c_adapter_lookup {
u16 slave_addr;
struct intel_dsi *intel_dsi;
@@ -103,19 +66,6 @@ struct i2c_adapter_lookup {
#define CHV_GPIO_IDX_START_SW 100
#define CHV_GPIO_IDX_START_SE 198
-#define CHV_VBT_MAX_PINS_PER_FMLY 15
-
-#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
-#define CHV_GPIO_GPIOEN (1 << 15)
-#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
-#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
-#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
-#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
-#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
-
-#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
-#define CHV_GPIO_CFGLOCK (1 << 31)
-
/* ICL DSI Display GPIO Pins */
#define ICL_GPIO_DDSP_HPD_A 0
#define ICL_GPIO_L_VDDEN_1 1
@@ -142,7 +92,7 @@ static enum port intel_dsi_seq_port_to_port(struct intel_dsi *intel_dsi,
if (seq_port) {
if (intel_dsi->ports & BIT(PORT_B))
return PORT_B;
- else if (intel_dsi->ports & BIT(PORT_C))
+ if (intel_dsi->ports & BIT(PORT_C))
return PORT_C;
}
@@ -243,75 +193,93 @@ static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
-static void vlv_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void soc_gpio_set_value(struct intel_connector *connector, u8 gpio_index,
+ const char *con_id, u8 idx, bool value)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct gpio_map *map;
- u16 pconf0, padval;
- u32 tmp;
- u8 port;
-
- if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
- drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
- gpio_index);
- return;
+ /* XXX: this table is a quick ugly hack. */
+ static struct gpio_desc *soc_gpio_table[U8_MAX + 1];
+ struct gpio_desc *gpio_desc = soc_gpio_table[gpio_index];
+
+ if (gpio_desc) {
+ gpiod_set_value(gpio_desc, value);
+ } else {
+ gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev, con_id, idx,
+ value ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW);
+ if (IS_ERR(gpio_desc)) {
+ drm_err(&dev_priv->drm,
+ "GPIO index %u request failed (%pe)\n",
+ gpio_index, gpio_desc);
+ return;
+ }
+
+ soc_gpio_table[gpio_index] = gpio_desc;
}
+}
- map = &vlv_gpio_table[gpio_index];
+static void soc_opaque_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_index, const char *chip,
+ const char *con_id, u8 idx, bool value)
+{
+ struct gpiod_lookup_table *lookup;
- if (connector->panel.vbt.dsi.seq_version >= 3) {
- /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
- port = IOSF_PORT_GPIO_NC;
- } else {
- if (gpio_source == 0) {
- port = IOSF_PORT_GPIO_NC;
- } else if (gpio_source == 1) {
+ lookup = kzalloc(struct_size(lookup, table, 2), GFP_KERNEL);
+ if (!lookup)
+ return;
+
+ lookup->dev_id = "0000:00:02.0";
+ lookup->table[0] =
+ GPIO_LOOKUP_IDX(chip, idx, con_id, idx, GPIO_ACTIVE_HIGH);
+
+ gpiod_add_lookup_table(lookup);
+
+ soc_gpio_set_value(connector, gpio_index, con_id, idx, value);
+
+ gpiod_remove_lookup_table(lookup);
+ kfree(lookup);
+}
+
+static void vlv_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
+ if (connector->panel.vbt.dsi.seq_version < 3) {
+ if (gpio_source == 1) {
drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
return;
- } else {
+ }
+ if (gpio_source > 1) {
drm_dbg_kms(&dev_priv->drm,
"unknown gpio source %u\n", gpio_source);
return;
}
}
- pconf0 = VLV_GPIO_PCONF0(map->base_offset);
- padval = VLV_GPIO_PAD_VAL(map->base_offset);
-
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
- if (!map->init) {
- /* FIXME: remove constant below */
- vlv_iosf_sb_write(dev_priv, port, pconf0, 0x2000CC00);
- map->init = true;
- }
-
- tmp = 0x4 | value;
- vlv_iosf_sb_write(dev_priv, port, padval, tmp);
- vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
+ soc_opaque_gpio_set_value(connector, gpio_index,
+ "INT33FC:01", "Panel N", gpio_index, value);
}
-static void chv_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void chv_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_source, u8 gpio_index, bool value)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u16 cfg0, cfg1;
- u16 family_num;
- u8 port;
if (connector->panel.vbt.dsi.seq_version >= 3) {
if (gpio_index >= CHV_GPIO_IDX_START_SE) {
/* XXX: it's unclear whether 255->57 is part of SE. */
- gpio_index -= CHV_GPIO_IDX_START_SE;
- port = CHV_IOSF_PORT_GPIO_SE;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:03", "Panel SE",
+ gpio_index - CHV_GPIO_IDX_START_SE, value);
} else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
- gpio_index -= CHV_GPIO_IDX_START_SW;
- port = CHV_IOSF_PORT_GPIO_SW;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:00", "Panel SW",
+ gpio_index - CHV_GPIO_IDX_START_SW, value);
} else if (gpio_index >= CHV_GPIO_IDX_START_E) {
- gpio_index -= CHV_GPIO_IDX_START_E;
- port = CHV_IOSF_PORT_GPIO_E;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:02", "Panel E",
+ gpio_index - CHV_GPIO_IDX_START_E, value);
} else {
- port = CHV_IOSF_PORT_GPIO_N;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
+ gpio_index - CHV_GPIO_IDX_START_N, value);
}
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
@@ -328,56 +296,15 @@ static void chv_exec_gpio(struct intel_connector *connector,
return;
}
- port = CHV_IOSF_PORT_GPIO_N;
- }
-
- family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
- gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
-
- cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
- cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
-
- vlv_iosf_sb_get(dev_priv, BIT(VLV_IOSF_SB_GPIO));
- vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
- vlv_iosf_sb_write(dev_priv, port, cfg0,
- CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO |
- CHV_GPIO_GPIOTXSTATE(value));
- vlv_iosf_sb_put(dev_priv, BIT(VLV_IOSF_SB_GPIO));
-}
-
-static void bxt_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- /* XXX: this table is a quick ugly hack. */
- static struct gpio_desc *bxt_gpio_table[U8_MAX + 1];
- struct gpio_desc *gpio_desc = bxt_gpio_table[gpio_index];
-
- if (!gpio_desc) {
- gpio_desc = devm_gpiod_get_index(dev_priv->drm.dev,
- NULL, gpio_index,
- value ? GPIOD_OUT_LOW :
- GPIOD_OUT_HIGH);
-
- if (IS_ERR_OR_NULL(gpio_desc)) {
- drm_err(&dev_priv->drm,
- "GPIO index %u request failed (%ld)\n",
- gpio_index, PTR_ERR(gpio_desc));
- return;
- }
-
- bxt_gpio_table[gpio_index] = gpio_desc;
+ soc_opaque_gpio_set_value(connector, gpio_index, "INT33FF:01", "Panel N",
+ gpio_index - CHV_GPIO_IDX_START_N, value);
}
-
- gpiod_set_value(gpio_desc, value);
}
-static void icl_exec_gpio(struct intel_connector *connector,
- u8 gpio_source, u8 gpio_index, bool value)
+static void bxt_gpio_set_value(struct intel_connector *connector,
+ u8 gpio_index, bool value)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
+ soc_gpio_set_value(connector, gpio_index, NULL, gpio_index, value);
}
enum {
@@ -462,44 +389,45 @@ static void icl_native_gpio_set_value(struct drm_i915_private *dev_priv,
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
{
struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *i915 = to_i915(dev);
struct intel_connector *connector = intel_dsi->attached_connector;
- u8 gpio_source, gpio_index = 0, gpio_number;
+ u8 gpio_source = 0, gpio_index = 0, gpio_number;
bool value;
- bool native = DISPLAY_VER(dev_priv) >= 11;
+ int size;
+ bool native = DISPLAY_VER(i915) >= 11;
- if (connector->panel.vbt.dsi.seq_version >= 3)
- gpio_index = *data++;
+ if (connector->panel.vbt.dsi.seq_version >= 3) {
+ size = 3;
- gpio_number = *data++;
+ gpio_index = data[0];
+ gpio_number = data[1];
+ value = data[2] & BIT(0);
- /* gpio source in sequence v2 only */
- if (connector->panel.vbt.dsi.seq_version == 2)
- gpio_source = (*data >> 1) & 3;
- else
- gpio_source = 0;
+ if (connector->panel.vbt.dsi.seq_version >= 4 && data[2] & BIT(1))
+ native = false;
+ } else {
+ size = 2;
- if (connector->panel.vbt.dsi.seq_version >= 4 && *data & BIT(1))
- native = false;
+ gpio_number = data[0];
+ value = data[1] & BIT(0);
- /* pull up/down */
- value = *data++ & 1;
+ if (connector->panel.vbt.dsi.seq_version == 2)
+ gpio_source = (data[1] >> 1) & 3;
+ }
- drm_dbg_kms(&dev_priv->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
+ drm_dbg_kms(&i915->drm, "GPIO index %u, number %u, source %u, native %s, set to %s\n",
gpio_index, gpio_number, gpio_source, str_yes_no(native), str_on_off(value));
if (native)
- icl_native_gpio_set_value(dev_priv, gpio_number, value);
- else if (DISPLAY_VER(dev_priv) >= 11)
- icl_exec_gpio(connector, gpio_source, gpio_index, value);
- else if (IS_VALLEYVIEW(dev_priv))
- vlv_exec_gpio(connector, gpio_source, gpio_number, value);
- else if (IS_CHERRYVIEW(dev_priv))
- chv_exec_gpio(connector, gpio_source, gpio_number, value);
- else
- bxt_exec_gpio(connector, gpio_source, gpio_index, value);
-
- return data;
+ icl_native_gpio_set_value(i915, gpio_number, value);
+ else if (DISPLAY_VER(i915) >= 9)
+ bxt_gpio_set_value(connector, gpio_index, value);
+ else if (IS_VALLEYVIEW(i915))
+ vlv_gpio_set_value(connector, gpio_source, gpio_number, value);
+ else if (IS_CHERRYVIEW(i915))
+ chv_gpio_set_value(connector, gpio_source, gpio_number, value);
+
+ return data + size;
}
#ifdef CONFIG_ACPI
@@ -658,6 +586,7 @@ static const fn_mipi_elem_exec exec_elem[] = {
*/
static const char * const seq_name[] = {
+ [MIPI_SEQ_END] = "MIPI_SEQ_END",
[MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET",
[MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP",
[MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON",
@@ -673,10 +602,10 @@ static const char * const seq_name[] = {
static const char *sequence_name(enum mipi_seq seq_id)
{
- if (seq_id < ARRAY_SIZE(seq_name) && seq_name[seq_id])
+ if (seq_id < ARRAY_SIZE(seq_name))
return seq_name[seq_id];
- else
- return "(unknown)";
+
+ return "(unknown)";
}
static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
@@ -707,13 +636,10 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
if (connector->panel.vbt.dsi.seq_version >= 3)
data += 4;
- while (1) {
+ while (*data != MIPI_SEQ_ELEM_END) {
u8 operation_byte = *data++;
u8 operation_size = 0;
- if (operation_byte == MIPI_SEQ_ELEM_END)
- break;
-
if (operation_byte < ARRAY_SIZE(exec_elem))
mipi_elem_exec = exec_elem[operation_byte];
else
@@ -873,36 +799,34 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
* multiply by 100 to preserve remainder
*/
if (intel_dsi->video_mode == BURST_MODE) {
- if (mipi_config->target_burst_mode_freq) {
- u32 bitrate = intel_dsi_bitrate(intel_dsi);
-
- /*
- * Sometimes the VBT contains a slightly lower clock,
- * then the bitrate we have calculated, in this case
- * just replace it with the calculated bitrate.
- */
- if (mipi_config->target_burst_mode_freq < bitrate &&
- intel_fuzzy_clock_check(
- mipi_config->target_burst_mode_freq,
- bitrate))
- mipi_config->target_burst_mode_freq = bitrate;
-
- if (mipi_config->target_burst_mode_freq < bitrate) {
- drm_err(&dev_priv->drm,
- "Burst mode freq is less than computed\n");
- return false;
- }
+ u32 bitrate;
- burst_mode_ratio = DIV_ROUND_UP(
- mipi_config->target_burst_mode_freq * 100,
- bitrate);
+ if (mipi_config->target_burst_mode_freq == 0) {
+ drm_err(&dev_priv->drm, "Burst mode target is not set\n");
+ return false;
+ }
- intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
- } else {
- drm_err(&dev_priv->drm,
- "Burst mode target is not set\n");
+ bitrate = intel_dsi_bitrate(intel_dsi);
+
+ /*
+ * Sometimes the VBT contains a slightly lower clock, then
+ * the bitrate we have calculated, in this case just replace it
+ * with the calculated bitrate.
+ */
+ if (mipi_config->target_burst_mode_freq < bitrate &&
+ intel_fuzzy_clock_check(mipi_config->target_burst_mode_freq,
+ bitrate))
+ mipi_config->target_burst_mode_freq = bitrate;
+
+ if (mipi_config->target_burst_mode_freq < bitrate) {
+ drm_err(&dev_priv->drm, "Burst mode freq is less than computed\n");
return false;
}
+
+ burst_mode_ratio =
+ DIV_ROUND_UP(mipi_config->target_burst_mode_freq * 100, bitrate);
+
+ intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
} else
burst_mode_ratio = 100;
@@ -964,6 +888,7 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
struct intel_connector *connector = intel_dsi->attached_connector;
struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
enum gpiod_flags flags = panel_is_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ struct gpiod_lookup_table *gpiod_lookup_table = NULL;
bool want_backlight_gpio = false;
bool want_panel_gpio = false;
struct pinctrl *pinctrl;
@@ -971,12 +896,12 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
mipi_config->pwm_blc == PPS_BLC_PMIC) {
- gpiod_add_lookup_table(&pmic_panel_gpio_table);
+ gpiod_lookup_table = &pmic_panel_gpio_table;
want_panel_gpio = true;
}
if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
- gpiod_add_lookup_table(&soc_panel_gpio_table);
+ gpiod_lookup_table = &soc_panel_gpio_table;
want_panel_gpio = true;
want_backlight_gpio = true;
@@ -993,8 +918,11 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
"Failed to set pinmux to PWM\n");
}
+ if (gpiod_lookup_table)
+ gpiod_add_lookup_table(gpiod_lookup_table);
+
if (want_panel_gpio) {
- intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
+ intel_dsi->gpio_panel = devm_gpiod_get(dev->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
drm_err(&dev_priv->drm,
"Failed to own gpio for panel control\n");
@@ -1004,38 +932,14 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
if (want_backlight_gpio) {
intel_dsi->gpio_backlight =
- gpiod_get(dev->dev, "backlight", flags);
+ devm_gpiod_get(dev->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
drm_err(&dev_priv->drm,
"Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
}
-}
-void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi)
-{
- struct drm_device *dev = intel_dsi->base.base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_connector *connector = intel_dsi->attached_connector;
- struct mipi_config *mipi_config = connector->panel.vbt.dsi.config;
-
- if (intel_dsi->gpio_panel) {
- gpiod_put(intel_dsi->gpio_panel);
- intel_dsi->gpio_panel = NULL;
- }
-
- if (intel_dsi->gpio_backlight) {
- gpiod_put(intel_dsi->gpio_backlight);
- intel_dsi->gpio_backlight = NULL;
- }
-
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- mipi_config->pwm_blc == PPS_BLC_PMIC)
- gpiod_remove_lookup_table(&pmic_panel_gpio_table);
-
- if (IS_VALLEYVIEW(dev_priv) && mipi_config->pwm_blc == PPS_BLC_SOC) {
- pinctrl_unregister_mappings(soc_pwm_pinctrl_map);
- gpiod_remove_lookup_table(&soc_panel_gpio_table);
- }
+ if (gpiod_lookup_table)
+ gpiod_remove_lookup_table(gpiod_lookup_table);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
index 468d873fab..3462fcc760 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.h
@@ -13,7 +13,6 @@ struct intel_dsi;
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
-void intel_dsi_vbt_gpio_cleanup(struct intel_dsi *intel_dsi);
void intel_dsi_vbt_exec_sequence(struct intel_dsi *intel_dsi,
enum mipi_seq seq_id);
void intel_dsi_log_params(struct intel_dsi *intel_dsi);
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 646f367a13..0c0144eaa8 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -4,7 +4,6 @@
*/
#include <drm/drm_blend.h>
-#include <drm/drm_framebuffer.h>
#include <drm/drm_modeset_helper.h>
#include <linux/dma-fence.h>
@@ -15,6 +14,7 @@
#include "intel_display_types.h"
#include "intel_dpt.h"
#include "intel_fb.h"
+#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
@@ -301,6 +301,33 @@ lookup_format_info(const struct drm_format_info formats[],
return NULL;
}
+unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
+{
+ const struct intel_modifier_desc *md;
+ u8 tiling_caps;
+
+ md = lookup_modifier_or_null(fb_modifier);
+ if (!md)
+ return I915_TILING_NONE;
+
+ tiling_caps = lookup_modifier_or_null(fb_modifier)->plane_caps &
+ INTEL_PLANE_CAP_TILING_MASK;
+
+ switch (tiling_caps) {
+ case INTEL_PLANE_CAP_TILING_Y:
+ return I915_TILING_Y;
+ case INTEL_PLANE_CAP_TILING_X:
+ return I915_TILING_X;
+ case INTEL_PLANE_CAP_TILING_4:
+ case INTEL_PLANE_CAP_TILING_Yf:
+ case INTEL_PLANE_CAP_TILING_NONE:
+ return I915_TILING_NONE;
+ default:
+ MISSING_CASE(tiling_caps);
+ return I915_TILING_NONE;
+ }
+}
+
/**
* intel_fb_get_format_info: Get a modifier specific format information
* @cmd: FB add command structure
@@ -737,26 +764,6 @@ intel_fb_align_height(const struct drm_framebuffer *fb,
return ALIGN(height, tile_height);
}
-static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
-{
- u8 tiling_caps = lookup_modifier(fb_modifier)->plane_caps &
- INTEL_PLANE_CAP_TILING_MASK;
-
- switch (tiling_caps) {
- case INTEL_PLANE_CAP_TILING_Y:
- return I915_TILING_Y;
- case INTEL_PLANE_CAP_TILING_X:
- return I915_TILING_X;
- case INTEL_PLANE_CAP_TILING_4:
- case INTEL_PLANE_CAP_TILING_Yf:
- case INTEL_PLANE_CAP_TILING_NONE:
- return I915_TILING_NONE;
- default:
- MISSING_CASE(tiling_caps);
- return I915_TILING_NONE;
- }
-}
-
bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
{
return HAS_DPT(i915) && modifier != DRM_FORMAT_MOD_LINEAR;
@@ -764,7 +771,7 @@ bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier)
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb)
{
- return fb && to_i915(fb->dev)->params.enable_dpt &&
+ return to_i915(fb->dev)->display.params.enable_dpt &&
intel_fb_modifier_uses_dpt(to_i915(fb->dev), fb->modifier);
}
@@ -1670,10 +1677,10 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
max_size = max(max_size, offset + size);
}
- if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
+ if (mul_u32_u32(max_size, tile_size) > intel_bo_to_drm_bo(obj)->size) {
drm_dbg_kms(&i915->drm,
"fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), obj->base.size);
+ mul_u32_u32(max_size, tile_size), intel_bo_to_drm_bo(obj)->size);
return -EINVAL;
}
@@ -1894,6 +1901,8 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intel_frontbuffer_put(intel_fb->frontbuffer);
+ intel_fb_bo_framebuffer_fini(intel_fb_obj(fb));
+
kfree(intel_fb);
}
@@ -1902,7 +1911,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
unsigned int *handle)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct drm_i915_private *i915 = to_i915(intel_bo_to_drm_bo(obj)->dev);
if (i915_gem_object_is_userptr(obj)) {
drm_dbg(&i915->drm,
@@ -1910,7 +1919,7 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
return -EINVAL;
}
- return drm_gem_handle_create(file, &obj->base, handle);
+ return drm_gem_handle_create(file, intel_bo_to_drm_bo(obj), handle);
}
struct frontbuffer_fence_cb {
@@ -1943,10 +1952,10 @@ static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
if (!atomic_read(&front->bits))
return 0;
- if (dma_resv_test_signaled(obj->base.resv, dma_resv_usage_rw(false)))
+ if (dma_resv_test_signaled(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false)))
goto flush;
- ret = dma_resv_get_singleton(obj->base.resv, dma_resv_usage_rw(false),
+ ret = dma_resv_get_singleton(intel_bo_to_drm_bo(obj)->resv, dma_resv_usage_rw(false),
&fence);
if (ret || !fence)
goto flush;
@@ -1988,61 +1997,30 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_i915_private *dev_priv = to_i915(intel_bo_to_drm_bo(obj)->dev);
struct drm_framebuffer *fb = &intel_fb->base;
u32 max_stride;
- unsigned int tiling, stride;
int ret = -EINVAL;
int i;
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
+ ret = intel_fb_bo_framebuffer_init(intel_fb, obj, mode_cmd);
+ if (ret)
+ return ret;
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode doesn't match fb modifier\n");
- goto err;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(&dev_priv->drm,
- "No Y tiling for legacy addfb\n");
- goto err;
- }
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer) {
+ ret = -ENOMEM;
+ goto err;
}
+ ret = -EINVAL;
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
drm_dbg_kms(&dev_priv->drm,
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
- goto err;
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(dev_priv) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- goto err;
+ goto err_frontbuffer_put;
}
max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
@@ -2053,18 +2031,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
"tiled" : "linear",
mode_cmd->pitches[0], max_stride);
- goto err;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(&dev_priv->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- goto err;
+ goto err_frontbuffer_put;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
@@ -2072,7 +2039,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(&dev_priv->drm,
"plane 0 offset (0x%08x) must be 0\n",
mode_cmd->offsets[0]);
- goto err;
+ goto err_frontbuffer_put;
}
drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
@@ -2083,7 +2050,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
i);
- goto err;
+ goto err_frontbuffer_put;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
@@ -2091,7 +2058,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
drm_dbg_kms(&dev_priv->drm,
"plane %d pitch (%d) must be at least %u byte aligned\n",
i, fb->pitches[i], stride_alignment);
- goto err;
+ goto err_frontbuffer_put;
}
if (intel_fb_is_gen12_ccs_aux_plane(fb, i)) {
@@ -2102,16 +2069,16 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
"ccs aux plane %d pitch (%d) must be %d\n",
i,
fb->pitches[i], ccs_aux_stride);
- goto err;
+ goto err_frontbuffer_put;
}
}
- fb->obj[i] = &obj->base;
+ fb->obj[i] = intel_bo_to_drm_bo(obj);
}
ret = intel_fill_fb_info(dev_priv, intel_fb);
if (ret)
- goto err;
+ goto err_frontbuffer_put;
if (intel_fb_uses_dpt(fb)) {
struct i915_address_space *vm;
@@ -2120,7 +2087,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (IS_ERR(vm)) {
drm_dbg_kms(&dev_priv->drm, "failed to create DPT\n");
ret = PTR_ERR(vm);
- goto err;
+ goto err_frontbuffer_put;
}
intel_fb->dpt_vm = vm;
@@ -2137,8 +2104,10 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
err_free_dpt:
if (intel_fb_uses_dpt(fb))
intel_dpt_destroy(intel_fb->dpt_vm);
-err:
+err_frontbuffer_put:
intel_frontbuffer_put(intel_fb->frontbuffer);
+err:
+ intel_fb_bo_framebuffer_fini(obj);
return ret;
}
@@ -2150,23 +2119,14 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- struct drm_i915_private *i915;
-
- obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
- return ERR_PTR(-EREMOTE);
- }
+ struct drm_i915_private *i915 = to_i915(dev);
+
+ obj = intel_fb_bo_lookup_valid_bo(i915, filp, &mode_cmd);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
+ drm_gem_object_put(intel_bo_to_drm_bo(obj));
return fb;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index e85167d6bc..23db6628f5 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -95,4 +95,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
bool intel_fb_modifier_uses_dpt(struct drm_i915_private *i915, u64 modifier);
bool intel_fb_uses_dpt(const struct drm_framebuffer *fb);
+unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier);
+
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
new file mode 100644
index 0000000000..4be09541e5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.c
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_framebuffer.h>
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_fb.h"
+#include "intel_fb_bo.h"
+
+void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj)
+{
+ /* Nothing to do for i915 */
+}
+
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int tiling, stride;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&i915->drm,
+ "No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (DISPLAY_VER(i915) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&i915->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct drm_i915_gem_object *
+intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
+ return ERR_PTR(-EREMOTE);
+ }
+
+ return obj;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
new file mode 100644
index 0000000000..232bf898b0
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_bo.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_BO_H__
+#define __INTEL_FB_BO_H__
+
+struct drm_file;
+struct drm_mode_fb_cmd2;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct intel_framebuffer;
+
+void intel_fb_bo_framebuffer_fini(struct drm_i915_gem_object *obj);
+
+int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+
+struct drm_i915_gem_object *
+intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7b42aef37d..b6df9baf48 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -255,6 +255,16 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state)
return PTR_ERR(vma);
plane_state->ggtt_vma = vma;
+
+ /*
+ * Pre-populate the dma address before we enter the vblank
+ * evade critical section as i915_gem_object_get_dma_address()
+ * will trigger might_sleep() even if it won't actually sleep,
+ * which is the case when the fb has already been pinned.
+ */
+ if (phys_cursor)
+ plane_state->phys_dma_addr =
+ i915_gem_object_get_dma_address(intel_fb_obj(fb), 0);
} else {
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 4820d21cc9..f17a1afb49 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -608,6 +608,7 @@ static u32 ivb_dpfc_ctl(struct intel_fbc *fbc)
static void ivb_fbc_activate(struct intel_fbc *fbc)
{
struct drm_i915_private *i915 = fbc->i915;
+ u32 dpfc_ctl;
if (DISPLAY_VER(i915) >= 10)
glk_fbc_program_cfb_stride(fbc);
@@ -617,8 +618,13 @@ static void ivb_fbc_activate(struct intel_fbc *fbc)
if (intel_gt_support_legacy_fencing(to_gt(i915)))
snb_fbc_program_fence(fbc);
+ /* wa_14019417088 Alternative WA*/
+ dpfc_ctl = ivb_dpfc_ctl(fbc);
+ if (DISPLAY_VER(i915) >= 20)
+ intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl);
+
intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id),
- DPFC_CTL_EN | ivb_dpfc_ctl(fbc));
+ DPFC_CTL_EN | dpfc_ctl);
}
static bool ivb_fbc_is_compressing(struct intel_fbc *fbc)
@@ -1022,10 +1028,13 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
unsigned int effective_w, effective_h, max_w, max_h;
- if (DISPLAY_VER(i915) >= 10) {
+ if (DISPLAY_VER(i915) >= 11) {
+ max_w = 8192;
+ max_h = 4096;
+ } else if (DISPLAY_VER(i915) >= 10) {
max_w = 5120;
max_h = 4096;
- } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
+ } else if (DISPLAY_VER(i915) >= 7) {
max_w = 4096;
max_h = 4096;
} else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
@@ -1044,6 +1053,31 @@ static bool intel_fbc_hw_tracking_covers_screen(const struct intel_plane_state *
return effective_w <= max_w && effective_h <= max_h;
}
+static bool intel_fbc_plane_size_valid(const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
+ unsigned int w, h, max_w, max_h;
+
+ if (DISPLAY_VER(i915) >= 10) {
+ max_w = 5120;
+ max_h = 4096;
+ } else if (DISPLAY_VER(i915) >= 8 || IS_HASWELL(i915)) {
+ max_w = 4096;
+ max_h = 4096;
+ } else if (IS_G4X(i915) || DISPLAY_VER(i915) >= 5) {
+ max_w = 4096;
+ max_h = 2048;
+ } else {
+ max_w = 2048;
+ max_h = 1536;
+ }
+
+ w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ h = drm_rect_height(&plane_state->uapi.src) >> 16;
+
+ return w <= max_w && h <= max_h;
+}
+
static bool i8xx_fbc_tiling_valid(const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
@@ -1174,7 +1208,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!i915->params.enable_fbc) {
+ if (!i915->display.params.enable_fbc) {
plane_state->no_fbc_reason = "disabled per module param or by default";
return 0;
}
@@ -1201,7 +1235,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Recommendation is to keep this combination disabled
* Bspec: 50422 HSD: 14010260002
*/
- if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
+ if (IS_DISPLAY_VER(i915, 12, 14) && crtc_state->has_psr2) {
plane_state->no_fbc_reason = "PSR2 enabled";
return 0;
}
@@ -1241,11 +1275,16 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ if (!intel_fbc_plane_size_valid(plane_state)) {
plane_state->no_fbc_reason = "plane size too big";
return 0;
}
+ if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
+ plane_state->no_fbc_reason = "surface size too big";
+ return 0;
+ }
+
/*
* Work around a problem on GEN9+ HW, where enabling FBC on a plane
* having a Y offset that isn't divisible by 4 causes FIFO underrun
@@ -1751,8 +1790,8 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915)
*/
static int intel_sanitize_fbc_option(struct drm_i915_private *i915)
{
- if (i915->params.enable_fbc >= 0)
- return !!i915->params.enable_fbc;
+ if (i915->display.params.enable_fbc >= 0)
+ return !!i915->display.params.enable_fbc;
if (!HAS_FBC(i915))
return 0;
@@ -1824,9 +1863,9 @@ void intel_fbc_init(struct drm_i915_private *i915)
if (need_fbc_vtd_wa(i915))
DISPLAY_RUNTIME_INFO(i915)->fbc_mask = 0;
- i915->params.enable_fbc = intel_sanitize_fbc_option(i915);
+ i915->display.params.enable_fbc = intel_sanitize_fbc_option(i915);
drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n",
- i915->params.enable_fbc);
+ i915->display.params.enable_fbc);
for_each_fbc_id(i915, fbc_id)
i915->display.fbc[fbc_id] = intel_fbc_create(i915, fbc_id);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 31d0d695d5..99894a855e 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -43,7 +43,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
-#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_mman.h"
#include "i915_drv.h"
@@ -51,6 +50,7 @@
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
+#include "intel_fbdev_fb.h"
#include "intel_frontbuffer.h"
struct intel_fbdev {
@@ -146,65 +146,6 @@ static const struct fb_ops intelfb_ops = {
.fb_mmap = intel_fbdev_mmap,
};
-static int intelfb_alloc(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct intel_fbdev *ifbdev = to_intel_fbdev(helper);
- struct drm_framebuffer *fb;
- struct drm_device *dev = helper->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_fb_cmd2 mode_cmd = {};
- struct drm_i915_gem_object *obj;
- int size;
-
- /* we don't do packed 24bpp */
- if (sizes->surface_bpp == 24)
- sizes->surface_bpp = 32;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
-
- mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
- DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
- mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
- sizes->surface_depth);
-
- size = mode_cmd.pitches[0] * mode_cmd.height;
- size = PAGE_ALIGN(size);
-
- obj = ERR_PTR(-ENODEV);
- if (HAS_LMEM(dev_priv)) {
- obj = i915_gem_object_create_lmem(dev_priv, size,
- I915_BO_ALLOC_CONTIGUOUS |
- I915_BO_ALLOC_USER);
- } else {
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- *
- * Also skip stolen on MTL as Wa_22018444074 mitigation.
- */
- if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
- obj = i915_gem_object_create_stolen(dev_priv, size);
- if (IS_ERR(obj))
- obj = i915_gem_object_create_shmem(dev_priv, size);
- }
-
- if (IS_ERR(obj)) {
- drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
- return PTR_ERR(obj);
- }
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
- if (IS_ERR(fb))
- return PTR_ERR(fb);
-
- ifbdev->fb = to_intel_framebuffer(fb);
- return 0;
-}
-
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -213,7 +154,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct drm_device *dev = helper->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
- struct i915_ggtt *ggtt = to_gt(dev_priv)->ggtt;
const struct i915_gtt_view view = {
.type = I915_GTT_VIEW_NORMAL,
};
@@ -222,9 +162,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
struct i915_vma *vma;
unsigned long flags = 0;
bool prealloc = false;
- void __iomem *vaddr;
struct drm_i915_gem_object *obj;
- struct i915_gem_ww_ctx ww;
int ret;
mutex_lock(&ifbdev->hpd_lock);
@@ -245,12 +183,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
intel_fb = ifbdev->fb = NULL;
}
if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
+ struct drm_framebuffer *fb;
drm_dbg_kms(&dev_priv->drm,
"no BIOS fb, allocating a new one\n");
- ret = intelfb_alloc(helper, sizes);
- if (ret)
- return ret;
- intel_fb = ifbdev->fb;
+ fb = intel_fbdev_fb_alloc(helper, sizes);
+ if (IS_ERR(fb))
+ return PTR_ERR(fb);
+ intel_fb = ifbdev->fb = to_intel_framebuffer(fb);
} else {
drm_dbg_kms(&dev_priv->drm, "re-using BIOS fb\n");
prealloc = true;
@@ -283,49 +222,18 @@ static int intelfb_create(struct drm_fb_helper *helper,
info->fbops = &intelfb_ops;
obj = intel_fb_obj(&intel_fb->base);
- if (i915_gem_object_is_lmem(obj)) {
- struct intel_memory_region *mem = obj->mm.region;
-
- /* Use fbdev's framebuffer from lmem for discrete */
- info->fix.smem_start =
- (unsigned long)(mem->io_start +
- i915_gem_object_get_dma_address(obj, 0));
- info->fix.smem_len = obj->base.size;
- } else {
- /* Our framebuffer is the entirety of fbdev's system memory */
- info->fix.smem_start =
- (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
- info->fix.smem_len = vma->size;
- }
-
- for_i915_gem_ww(&ww, ret, false) {
- ret = i915_gem_object_lock(vma->obj, &ww);
-
- if (ret)
- continue;
-
- vaddr = i915_vma_pin_iomap(vma);
- if (IS_ERR(vaddr)) {
- drm_err(&dev_priv->drm,
- "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
- ret = PTR_ERR(vaddr);
- continue;
- }
- }
+ ret = intel_fbdev_fb_fill_info(dev_priv, info, obj, vma);
if (ret)
goto out_unpin;
- info->screen_base = vaddr;
- info->screen_size = vma->size;
-
drm_fb_helper_fill_info(info, &ifbdev->helper, sizes);
/* If the object is shmemfs backed, it will have given us zeroed pages.
* If the object is stolen however, it will be full of whatever
* garbage was left in there.
*/
- if (!i915_gem_object_is_shmem(vma->obj) && !prealloc)
+ if (!i915_gem_object_is_shmem(obj) && !prealloc)
memset_io(info->screen_base, 0, info->screen_size);
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
@@ -424,12 +332,12 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
continue;
}
- if (obj->base.size > max_size) {
+ if (intel_bo_to_drm_bo(obj)->size > max_size) {
drm_dbg_kms(&i915->drm,
"found possible fb from [PLANE:%d:%s]\n",
plane->base.base.id, plane->base.name);
fb = to_intel_framebuffer(plane_state->uapi.fb);
- max_size = obj->base.size;
+ max_size = intel_bo_to_drm_bo(obj)->size;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
new file mode 100644
index 0000000000..717c3a3237
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#include <drm/drm_fb_helper.h>
+
+#include "gem/i915_gem_lmem.h"
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_fbdev_fb.h"
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct drm_framebuffer *fb;
+ struct drm_device *dev = helper->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = {};
+ struct drm_i915_gem_object *obj;
+ int size;
+
+ /* we don't do packed 24bpp */
+ if (sizes->surface_bpp == 24)
+ sizes->surface_bpp = 32;
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width *
+ DIV_ROUND_UP(sizes->surface_bpp, 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = PAGE_ALIGN(size);
+
+ obj = ERR_PTR(-ENODEV);
+ if (HAS_LMEM(dev_priv)) {
+ obj = i915_gem_object_create_lmem(dev_priv, size,
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_USER);
+ } else {
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ *
+ * Also skip stolen on MTL as Wa_22018444074 mitigation.
+ */
+ if (!(IS_METEORLAKE(dev_priv)) && size * 2 < dev_priv->dsm.usable_size)
+ obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_shmem(dev_priv, size);
+ }
+
+ if (IS_ERR(obj)) {
+ drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ i915_gem_object_put(obj);
+
+ return fb;
+}
+
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma)
+{
+ struct i915_gem_ww_ctx ww;
+ void __iomem *vaddr;
+ int ret;
+
+ if (i915_gem_object_is_lmem(obj)) {
+ struct intel_memory_region *mem = obj->mm.region;
+
+ /* Use fbdev's framebuffer from lmem for discrete */
+ info->fix.smem_start =
+ (unsigned long)(mem->io_start +
+ i915_gem_object_get_dma_address(obj, 0));
+ info->fix.smem_len = obj->base.size;
+ } else {
+ struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
+
+ /* Our framebuffer is the entirety of fbdev's system memory */
+ info->fix.smem_start =
+ (unsigned long)(ggtt->gmadr.start + i915_ggtt_offset(vma));
+ info->fix.smem_len = vma->size;
+ }
+
+ for_i915_gem_ww(&ww, ret, false) {
+ ret = i915_gem_object_lock(vma->obj, &ww);
+
+ if (ret)
+ continue;
+
+ vaddr = i915_vma_pin_iomap(vma);
+ if (IS_ERR(vaddr)) {
+ drm_err(&i915->drm,
+ "Failed to remap framebuffer into virtual memory (%pe)\n", vaddr);
+ ret = PTR_ERR(vaddr);
+ continue;
+ }
+ }
+
+ if (ret)
+ return ret;
+
+ info->screen_base = vaddr;
+ info->screen_size = intel_bo_to_drm_bo(obj)->size;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
new file mode 100644
index 0000000000..a395b2c65d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef __INTEL_FBDEV_FB_H__
+#define __INTEL_FBDEV_FB_H__
+
+struct drm_fb_helper;
+struct drm_fb_helper_surface_size;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct fb_info;
+struct i915_vma;
+
+struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes);
+int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info,
+ struct drm_i915_gem_object *obj, struct i915_vma *vma);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index e6429dfebe..295a0f24eb 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -10,6 +10,7 @@
#include "intel_crtc.h"
#include "intel_ddi.h"
#include "intel_de.h"
+#include "intel_dp.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
@@ -338,8 +339,11 @@ int ilk_fdi_compute_config(struct intel_crtc *crtc,
pipe_config->fdi_lanes = lane;
- intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
- link_bw, &pipe_config->fdi_m_n, false);
+ intel_link_compute_m_n(to_bpp_x16(pipe_config->pipe_bpp),
+ lane, fdi_dotclock,
+ link_bw,
+ intel_dp_bw_fec_overhead(false),
+ &pipe_config->fdi_m_n);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 40d7b6f3f4..e9e4dcf345 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -899,7 +899,6 @@ int intel_gmbus_setup(struct drm_i915_private *i915)
}
bus->adapter.owner = THIS_MODULE;
- bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
sizeof(bus->adapter.name),
"i915 gmbus %s", gmbus_pin->name);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index c89da3568e..39b3f7c0c7 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -923,7 +923,7 @@ static int _intel_hdcp_disable(struct intel_connector *connector)
return 0;
}
-static int _intel_hdcp_enable(struct intel_connector *connector)
+static int intel_hdcp1_enable(struct intel_connector *connector)
{
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1058,7 +1058,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- ret = _intel_hdcp_enable(connector);
+ ret = intel_hdcp1_enable(connector);
if (ret) {
drm_err(&i915->drm, "Failed to enable hdcp (%d)\n", ret);
intel_hdcp_update_value(connector,
@@ -2324,10 +2324,10 @@ intel_hdcp_set_streams(struct intel_digital_port *dig_port,
return 0;
}
-int intel_hdcp_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state)
+static int _intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_connector *connector =
@@ -2388,7 +2388,7 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
*/
if (ret && intel_hdcp_capable(connector) &&
hdcp->content_type != DRM_MODE_HDCP_CONTENT_TYPE1) {
- ret = _intel_hdcp_enable(connector);
+ ret = intel_hdcp1_enable(connector);
}
if (!ret) {
@@ -2404,6 +2404,27 @@ int intel_hdcp_enable(struct intel_atomic_state *state,
return ret;
}
+void intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+
+ /*
+ * Enable hdcp if it's desired or if userspace is enabled and
+ * driver set its state to undesired
+ */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED))
+ _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+}
+
int intel_hdcp_disable(struct intel_connector *connector)
{
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
@@ -2491,7 +2512,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state,
}
if (desired_and_not_enabled || content_protection_type_changed)
- intel_hdcp_enable(state, encoder, crtc_state, conn_state);
+ _intel_hdcp_enable(state, encoder, crtc_state, conn_state);
}
void intel_hdcp_component_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index 5997c52a09..a9c784fd9b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -28,10 +28,10 @@ void intel_hdcp_atomic_check(struct drm_connector *connector,
int intel_hdcp_init(struct intel_connector *connector,
struct intel_digital_port *dig_port,
const struct intel_hdcp_shim *hdcp_shim);
-int intel_hdcp_enable(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *pipe_config,
- const struct drm_connector_state *conn_state);
+void intel_hdcp_enable(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *pipe_config,
+ const struct drm_connector_state *conn_state);
int intel_hdcp_disable(struct intel_connector *connector);
void intel_hdcp_update_pipe(struct intel_atomic_state *state,
struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index bfa456fa7d..39e4f5f7c8 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -3034,16 +3034,6 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
"HDCP init failed, skipping.\n");
}
- /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
- * 0xd. Failure to do so will result in spurious interrupts being
- * generated on the port when a cable is not attached.
- */
- if (IS_G45(dev_priv)) {
- u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
- intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
- (temp & ~0xf) | 0xd);
- }
-
cec_fill_conn_info_from_drm(&conn_info, connector);
intel_hdmi->cec_notifier =
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index f07047e9cb..04f62f27ad 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -1361,11 +1361,24 @@ static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
bxt_hpd_detection_setup(dev_priv);
}
+static void g45_hpd_peg_band_gap_wa(struct drm_i915_private *i915)
+{
+ /*
+ * For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
+ * 0xd. Failure to do so will result in spurious interrupts being
+ * generated on the port when a cable is not attached.
+ */
+ intel_de_rmw(i915, PEG_BAND_GAP_DATA, 0xf, 0xd);
+}
+
static void i915_hpd_enable_detection(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u32 hotplug_en = hpd_mask_i915[encoder->hpd_pin];
+ if (IS_G45(i915))
+ g45_hpd_peg_band_gap_wa(i915);
+
/* HPD sense and interrupt enable are one and the same */
i915_hotplug_interrupt_update(i915, hotplug_en, hotplug_en);
}
@@ -1389,6 +1402,9 @@ static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
+ if (IS_G45(dev_priv))
+ g45_hpd_peg_band_gap_wa(dev_priv);
+
/* Ignore TV since it's buggy */
i915_hotplug_interrupt_update_locked(dev_priv,
HOTPLUG_INT_EN_MASK |
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c
index c5eb5f2425..9c6d35a405 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.c
@@ -7,6 +7,7 @@
#include "intel_atomic.h"
#include "intel_display_types.h"
+#include "intel_dp_mst.h"
#include "intel_fdi.h"
#include "intel_link_bw.h"
@@ -21,6 +22,7 @@ void intel_link_bw_init_limits(struct drm_i915_private *i915, struct intel_link_
{
enum pipe pipe;
+ limits->force_fec_pipes = 0;
limits->bpp_limit_reached_pipes = 0;
for_each_pipe(i915, pipe)
limits->max_bpp_x16[pipe] = INT_MAX;
@@ -53,11 +55,11 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
struct drm_i915_private *i915 = to_i915(state->base.dev);
enum pipe max_bpp_pipe = INVALID_PIPE;
struct intel_crtc *crtc;
- int max_bpp = 0;
+ int max_bpp_x16 = 0;
for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
struct intel_crtc_state *crtc_state;
- int link_bpp;
+ int link_bpp_x16;
if (limits->bpp_limit_reached_pipes & BIT(crtc->pipe))
continue;
@@ -68,7 +70,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
return PTR_ERR(crtc_state);
if (crtc_state->dsc.compression_enable)
- link_bpp = crtc_state->dsc.compressed_bpp;
+ link_bpp_x16 = crtc_state->dsc.compressed_bpp_x16;
else
/*
* TODO: for YUV420 the actual link bpp is only half
@@ -76,10 +78,10 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
* is based on the pipe bpp value, set the actual link bpp
* limit here once the MST BW allocation is fixed.
*/
- link_bpp = crtc_state->pipe_bpp;
+ link_bpp_x16 = to_bpp_x16(crtc_state->pipe_bpp);
- if (link_bpp > max_bpp) {
- max_bpp = link_bpp;
+ if (link_bpp_x16 > max_bpp_x16) {
+ max_bpp_x16 = link_bpp_x16;
max_bpp_pipe = crtc->pipe;
}
}
@@ -87,7 +89,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state,
if (max_bpp_pipe == INVALID_PIPE)
return -ENOSPC;
- limits->max_bpp_x16[max_bpp_pipe] = to_bpp_x16(max_bpp) - 1;
+ limits->max_bpp_x16[max_bpp_pipe] = max_bpp_x16 - 1;
return intel_modeset_pipes_in_mask_early(state, reason,
BIT(max_bpp_pipe));
@@ -143,6 +145,10 @@ static int check_all_link_config(struct intel_atomic_state *state,
/* TODO: Check additional shared display link configurations like MST */
int ret;
+ ret = intel_dp_mst_atomic_check_link(state, limits);
+ if (ret)
+ return ret;
+
ret = intel_fdi_atomic_check_link(state, limits);
if (ret)
return ret;
@@ -158,6 +164,12 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
bool bpps_changed = false;
enum pipe pipe;
+ /* FEC can't be forced off after it was forced on. */
+ if (drm_WARN_ON(&i915->drm,
+ (old_limits->force_fec_pipes & new_limits->force_fec_pipes) !=
+ old_limits->force_fec_pipes))
+ return false;
+
for_each_pipe(i915, pipe) {
/* The bpp limit can only decrease. */
if (drm_WARN_ON(&i915->drm,
@@ -172,7 +184,9 @@ assert_link_limit_change_valid(struct drm_i915_private *i915,
/* At least one limit must change. */
if (drm_WARN_ON(&i915->drm,
- !bpps_changed))
+ !bpps_changed &&
+ new_limits->force_fec_pipes ==
+ old_limits->force_fec_pipes))
return false;
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h
index e07df22a77..2cf57307cc 100644
--- a/drivers/gpu/drm/i915/display/intel_link_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_link_bw.h
@@ -16,6 +16,7 @@ struct intel_atomic_state;
struct intel_crtc_state;
struct intel_link_bw_limits {
+ u8 force_fec_pipes;
u8 bpp_limit_reached_pipes;
/* in 1/16 bpp units */
int max_bpp_x16[I915_MAX_PIPES];
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index bcbdd1984f..221f5c6c87 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -185,7 +185,7 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
/* Convert from 100ms to 100us units */
pps->t4 = val * 1000;
- if (DISPLAY_VER(dev_priv) <= 4 &&
+ if (DISPLAY_VER(dev_priv) < 5 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
drm_dbg_kms(&dev_priv->drm,
"Panel power timings uninitialized, "
@@ -799,8 +799,8 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
unsigned int val;
/* use the module option value if specified */
- if (i915->params.lvds_channel_mode > 0)
- return i915->params.lvds_channel_mode == 2;
+ if (i915->display.params.lvds_channel_mode > 0)
+ return i915->display.params.lvds_channel_mode == 2;
/* single channel LVDS is limited to 112 MHz */
if (fixed_mode->clock > 112999)
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index b8f43efb0a..caeca3a844 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -769,8 +769,9 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
drm_connector_list_iter_begin(&i915->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
+ struct intel_crtc_state *crtc_state = NULL;
+
if (connector->get_hw_state(connector)) {
- struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
connector->base.dpms = DRM_MODE_DPMS_ON;
@@ -796,6 +797,10 @@ static void intel_modeset_readout_hw_state(struct drm_i915_private *i915)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
+
+ if (connector->sync_state)
+ connector->sync_state(connector, crtc_state);
+
drm_dbg_kms(&i915->drm,
"[CONNECTOR:%d:%s] hw state readout: %s\n",
connector->base.base.id, connector->base.name,
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_verify.c b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
index 5e1c2c7804..076298a8d4 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_verify.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_verify.c
@@ -244,7 +244,7 @@ void intel_modeset_verify_crtc(struct intel_atomic_state *state,
verify_crtc_state(state, crtc);
intel_shared_dpll_state_verify(state, crtc);
intel_mpllb_state_verify(state, crtc);
- intel_c10pll_state_verify(state, crtc);
+ intel_cx0pll_state_verify(state, crtc);
}
void intel_modeset_verify_disabled(struct intel_atomic_state *state)
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 84078fb82b..1ce785db6a 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -841,7 +841,7 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->display.opregion;
const struct firmware *fw = NULL;
- const char *name = dev_priv->params.vbt_firmware;
+ const char *name = dev_priv->display.params.vbt_firmware;
int ret;
if (!name || !*name)
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 483beedac5..0d8e5320a4 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -46,8 +46,8 @@
bool intel_panel_use_ssc(struct drm_i915_private *i915)
{
- if (i915->params.panel_use_ssc >= 0)
- return i915->params.panel_use_ssc != 0;
+ if (i915->display.params.panel_use_ssc >= 0)
+ return i915->display.params.panel_use_ssc != 0;
return i915->display.vbt.lvds_use_ssc &&
!intel_has_quirk(i915, QUIRK_LVDS_SSC_DISABLE);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 866786e6b3..baf679759e 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -8,6 +8,7 @@
#include "intel_crt.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
#include "intel_fdi.h"
#include "intel_fdi_regs.h"
#include "intel_lvds.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 73f0f1714b..a8fa3a2099 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -90,7 +90,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.pps_pipe;
bool pll_enabled, release_cl_override = false;
- enum dpio_phy phy = DPIO_PHY(pipe);
+ enum dpio_phy phy = vlv_pipe_to_phy(pipe);
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 614b4534ef..925776ba13 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -29,6 +29,7 @@
#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
+#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dp.h"
@@ -172,6 +173,15 @@
* irrelevant for normal operation.
*/
+bool intel_encoder_can_psr(struct intel_encoder *encoder)
+{
+ if (intel_encoder_is_dp(encoder) || encoder->type == INTEL_OUTPUT_DP_MST)
+ return CAN_PSR(enc_to_intel_dp(encoder)) ||
+ CAN_PANEL_REPLAY(enc_to_intel_dp(encoder));
+ else
+ return false;
+}
+
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
struct intel_connector *connector = intel_dp->attached_connector;
@@ -179,9 +189,9 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (i915->params.enable_psr == -1)
+ if (i915->display.params.enable_psr == -1)
return connector->panel.vbt.psr.enable;
- return i915->params.enable_psr;
+ return i915->display.params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -198,7 +208,7 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (i915->params.enable_psr == 1)
+ if (i915->display.params.enable_psr == 1)
return false;
return true;
}
@@ -474,27 +484,41 @@ exit:
intel_dp->psr.su_y_granularity = y;
}
-void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ u8 pr_dpcd = 0;
- drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
- sizeof(intel_dp->psr_dpcd));
+ intel_dp->psr.sink_panel_replay_support = false;
+ drm_dp_dpcd_readb(&intel_dp->aux, DP_PANEL_REPLAY_CAP, &pr_dpcd);
- if (!intel_dp->psr_dpcd[0])
+ if (!(pr_dpcd & DP_PANEL_REPLAY_SUPPORT)) {
+ drm_dbg_kms(&i915->drm,
+ "Panel replay is not supported by panel\n");
return;
- drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "Panel replay is supported by panel\n");
+ intel_dp->psr.sink_panel_replay_support = true;
+}
+
+static void _psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *i915 =
+ to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(&i915->drm,
"Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -503,8 +527,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 9 &&
- (intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED)) {
+ if (DISPLAY_VER(i915) >= 9 &&
+ intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
bool alpm = intel_dp_get_alpm_status(intel_dp);
@@ -521,14 +545,25 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* GTC first.
*/
intel_dp->psr.sink_psr2_support = y_req && alpm;
- drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+ drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
+ }
+}
- if (intel_dp->psr.sink_psr2_support) {
- intel_dp->psr.colorimetry_support =
- intel_dp_get_colorimetry_status(intel_dp);
- intel_dp_get_su_granularity(intel_dp);
- }
+void intel_psr_init_dpcd(struct intel_dp *intel_dp)
+{
+ _panel_replay_init_dpcd(intel_dp);
+
+ drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd,
+ sizeof(intel_dp->psr_dpcd));
+
+ if (intel_dp->psr_dpcd[0])
+ _psr_init_dpcd(intel_dp);
+
+ if (intel_dp->psr.sink_psr2_support) {
+ intel_dp->psr.colorimetry_support =
+ intel_dp_get_colorimetry_status(intel_dp);
+ intel_dp_get_su_granularity(intel_dp);
}
}
@@ -574,8 +609,11 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u8 dpcd_val = DP_PSR_ENABLE;
- /* Enable ALPM at sink for psr2 */
+ if (intel_dp->psr.panel_replay_enabled)
+ return;
+
if (intel_dp->psr.psr2_enabled) {
+ /* Enable ALPM at sink for psr2 */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG,
DP_ALPM_ENABLE |
DP_ALPM_LOCK_ERROR_IRQ_HPD_ENABLE);
@@ -592,6 +630,9 @@ static void intel_psr_enable_sink(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
dpcd_val |= DP_PSR_SU_REGION_SCANLINE_CAPTURE;
+ if (intel_dp->psr.entry_setup_frames > 0)
+ dpcd_val |= DP_PSR_FRAME_CAPTURE;
+
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, dpcd_val);
drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER, DP_SET_POWER_D0);
@@ -606,7 +647,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 11)
val |= EDP_PSR_TP4_TIME_0us;
- if (dev_priv->params.psr_safest_params) {
+ if (dev_priv->display.params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
val |= EDP_PSR_TP2_TP3_TIME_2500us;
goto check_tp3_sel;
@@ -690,6 +731,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
+
intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
@@ -700,7 +744,7 @@ static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
- if (dev_priv->params.psr_safest_params)
+ if (dev_priv->display.params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
@@ -727,21 +771,49 @@ static int psr2_block_count(struct intel_dp *intel_dp)
return psr2_block_count_lines(intel_dp) / 4;
}
+static u8 frames_before_su_entry(struct intel_dp *intel_dp)
+{
+ u8 frames_before_su_entry;
+
+ frames_before_su_entry = max_t(u8,
+ intel_dp->psr.sink_sync_latency + 1,
+ 2);
+
+ /* Entry setup frames must be at least 1 less than frames before SU entry */
+ if (intel_dp->psr.entry_setup_frames >= frames_before_su_entry)
+ frames_before_su_entry = intel_dp->psr.entry_setup_frames + 1;
+
+ return frames_before_su_entry;
+}
+
+static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_de_rmw(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
+ 0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
+
+ intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
+ TRANS_DP2_PANEL_REPLAY_ENABLE);
+}
+
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
+ u32 psr_val = 0;
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
+ if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
+ val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
+
val |= intel_psr2_get_tp_time(intel_dp);
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -785,6 +857,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
+ if (DISPLAY_VER(dev_priv) >= 20)
+ psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
+
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
@@ -798,7 +873,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), 0);
+ intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
intel_de_write(dev_priv, EDP_PSR2_CTL(cpu_transcoder), val);
}
@@ -816,13 +891,13 @@ transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_trans
return false;
}
-static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate)
+static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
{
- if (!cstate || !cstate->hw.active)
+ if (!crtc_state->hw.active)
return 0;
return DIV_ROUND_UP(1000 * 1000,
- drm_mode_vrefresh(&cstate->hw.adjusted_mode));
+ drm_mode_vrefresh(&crtc_state->hw.adjusted_mode));
}
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
@@ -943,7 +1018,7 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!dev_priv->params.enable_psr2_sel_fetch &&
+ if (!dev_priv->display.params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
@@ -1019,7 +1094,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
/* Not supported <13 / Wa_22012279113:adl-p */
- if (DISPLAY_VER(dev_priv) <= 13 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1056,7 +1131,7 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
fast_wake_lines > max_wake_lines)
return false;
- if (i915->params.psr_safest_params)
+ if (i915->display.params.psr_safest_params)
io_wake_lines = fast_wake_lines = max_wake_lines;
/* According to Bspec lower limit should be set as 7 lines. */
@@ -1066,6 +1141,39 @@ static bool _compute_psr2_wake_times(struct intel_dp *intel_dp,
return true;
}
+static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
+ int entry_setup_frames = 0;
+
+ if (psr_setup_time < 0) {
+ drm_dbg_kms(&i915->drm,
+ "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
+ return -ETIME;
+ }
+
+ if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
+ adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
+ if (DISPLAY_VER(i915) >= 20) {
+ /* setup entry frames can be up to 3 frames */
+ entry_setup_frames = 1;
+ drm_dbg_kms(&i915->drm,
+ "PSR setup entry frames %d\n",
+ entry_setup_frames);
+ } else {
+ drm_dbg_kms(&i915->drm,
+ "PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
+ return -ETIME;
+ }
+ }
+
+ return entry_setup_frames;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -1113,7 +1221,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(dev_priv) <= 13 && !IS_ALDERLAKE_P(dev_priv))) {
+ (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
drm_dbg_kms(&dev_priv->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
@@ -1206,24 +1314,42 @@ unsupported:
return false;
}
-void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static bool _psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- int psr_setup_time;
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int entry_setup_frames;
/*
* Current PSR panels don't work reliably with VRR enabled
* So if VRR is enabled, do not enable PSR.
*/
if (crtc_state->vrr.enable)
- return;
+ return false;
if (!CAN_PSR(intel_dp))
- return;
+ return false;
+
+ entry_setup_frames = intel_psr_entry_setup_frames(intel_dp, adjusted_mode);
+
+ if (entry_setup_frames >= 0) {
+ intel_dp->psr.entry_setup_frames = entry_setup_frames;
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: PSR setup timing not met\n");
+ return false;
+ }
+
+ return true;
+}
+
+void intel_psr_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
if (!psr_global_enabled(intel_dp)) {
drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
@@ -1242,23 +1368,25 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
return;
}
- psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
- if (psr_setup_time < 0) {
+ /*
+ * FIXME figure out what is wrong with PSR+bigjoiner and
+ * fix it. Presumably something related to the fact that
+ * PSR is a transcoder level feature.
+ */
+ if (crtc_state->bigjoiner_pipes) {
drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
- intel_dp->psr_dpcd[1]);
+ "PSR disabled due to bigjoiner\n");
return;
}
- if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
- adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR condition failed: PSR setup time (%d us) too long\n",
- psr_setup_time);
+ if (CAN_PANEL_REPLAY(intel_dp))
+ crtc_state->has_panel_replay = true;
+ else
+ crtc_state->has_psr = _psr_compute_config(intel_dp, crtc_state);
+
+ if (!(crtc_state->has_panel_replay || crtc_state->has_psr))
return;
- }
- crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
@@ -1279,18 +1407,23 @@ void intel_psr_get_config(struct intel_encoder *encoder,
return;
intel_dp = &dig_port->dp;
- if (!CAN_PSR(intel_dp))
+ if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
if (!intel_dp->psr.enabled)
goto unlock;
- /*
- * Not possible to read EDP_PSR/PSR2_CTL registers as it is
- * enabled/disabled because of frontbuffer tracking and others.
- */
- pipe_config->has_psr = true;
+ if (intel_dp->psr.panel_replay_enabled) {
+ pipe_config->has_panel_replay = true;
+ } else {
+ /*
+ * Not possible to read EDP_PSR/PSR2_CTL registers as it is
+ * enabled/disabled because of frontbuffer tracking and others.
+ */
+ pipe_config->has_psr = true;
+ }
+
pipe_config->has_psr2 = intel_dp->psr.psr2_enabled;
pipe_config->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
@@ -1327,8 +1460,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
lockdep_assert_held(&intel_dp->psr.lock);
- /* psr1 and psr2 are mutually exclusive.*/
- if (intel_dp->psr.psr2_enabled)
+ /* psr1, psr2 and panel-replay are mutually exclusive.*/
+ if (intel_dp->psr.panel_replay_enabled)
+ dg2_activate_panel_replay(intel_dp);
+ else if (intel_dp->psr.psr2_enabled)
hsw_activate_psr2(intel_dp);
else
hsw_activate_psr1(intel_dp);
@@ -1462,12 +1597,10 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* All supported adlp panels have 1-based X granularity, this may
* cause issues if non-supported panels are used.
*/
- if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv, MTL_CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
- else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
- ADLP_1_BASED_X_GRANULARITY);
+ if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ IS_ALDERLAKE_P(dev_priv))
+ intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ 0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (IS_DISPLAY_IP_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
@@ -1518,6 +1651,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
intel_dp->psr.psr2_enabled = crtc_state->has_psr2;
+ intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
intel_dp->psr.busy_frontbuffer_bits = 0;
intel_dp->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
intel_dp->psr.transcoder = crtc_state->cpu_transcoder;
@@ -1533,8 +1667,12 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
if (!psr_interrupt_error_check(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ if (intel_dp->psr.panel_replay_enabled)
+ drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
+ else
+ drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
+
intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
@@ -1563,7 +1701,10 @@ static void intel_psr_exit(struct intel_dp *intel_dp)
return;
}
- if (intel_dp->psr.psr2_enabled) {
+ if (intel_dp->psr.panel_replay_enabled) {
+ intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
+ TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
+ } else if (intel_dp->psr.psr2_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
val = intel_de_rmw(dev_priv, EDP_PSR2_CTL(cpu_transcoder),
@@ -1612,8 +1753,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (!intel_dp->psr.enabled)
return;
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
- intel_dp->psr.psr2_enabled ? "2" : "1");
+ if (intel_dp->psr.panel_replay_enabled)
+ drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ intel_dp->psr.psr2_enabled ? "2" : "1");
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
@@ -1646,6 +1790,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
intel_dp->psr.enabled = false;
+ intel_dp->psr.panel_replay_enabled = false;
intel_dp->psr.psr2_enabled = false;
intel_dp->psr.psr2_sel_fetch_enabled = false;
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
@@ -1793,81 +1938,6 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
}
-void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
-}
-
-void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
-{
- struct drm_i915_private *i915 = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- if (plane->id == PLANE_CURSOR)
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- plane_state->ctl);
- else
- intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
- PLANE_SEL_FETCH_CTL_ENABLE);
-}
-
-void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- enum pipe pipe = plane->pipe;
- const struct drm_rect *clip;
- u32 val;
- int x, y;
-
- if (!crtc_state->enable_psr2_sel_fetch)
- return;
-
- if (plane->id == PLANE_CURSOR)
- return;
-
- clip = &plane_state->psr2_sel_fetch_area;
-
- val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
- val |= plane_state->uapi.dst.x1;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
-
- x = plane_state->view.color_plane[color_plane].x;
-
- /*
- * From Bspec: UV surface Start Y Position = half of Y plane Y
- * start position.
- */
- if (!color_plane)
- y = plane_state->view.color_plane[color_plane].y + clip->y1;
- else
- y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
-
- val = y << 16 | x;
-
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
- val);
-
- /* Sizes are 0 based */
- val = (drm_rect_height(clip) - 1) << 16;
- val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
-}
-
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
@@ -2127,8 +2197,19 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
continue;
inter = pipe_clip;
- if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
+ sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
+ if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) {
+ sel_fetch_area->y1 = -1;
+ sel_fetch_area->y2 = -1;
+ /*
+ * if plane sel fetch was previously enabled ->
+ * disable it
+ */
+ if (drm_rect_height(&old_plane_state->psr2_sel_fetch_area) > 0)
+ crtc_state->update_planes |= BIT(plane->id);
+
continue;
+ }
if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
full_update = true;
@@ -2217,7 +2298,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!crtc_state->has_psr)
+ if (!(crtc_state->has_psr || crtc_state->has_panel_replay))
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2703,7 +2784,7 @@ void intel_psr_init(struct intel_dp *intel_dp)
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!HAS_PSR(dev_priv))
+ if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
return;
/*
@@ -2721,7 +2802,10 @@ void intel_psr_init(struct intel_dp *intel_dp)
return;
}
- intel_dp->psr.source_support = true;
+ if (HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp))
+ intel_dp->psr.source_panel_replay_support = true;
+ else
+ intel_dp->psr.source_support = true;
/* Set link_standby x link_off defaults */
if (DISPLAY_VER(dev_priv) < 12)
@@ -2738,12 +2822,19 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
{
struct drm_dp_aux *aux = &intel_dp->aux;
int ret;
+ unsigned int offset;
+
+ offset = intel_dp->psr.panel_replay_enabled ?
+ DP_SINK_DEVICE_PR_AND_FRAME_LOCK_STATUS : DP_PSR_STATUS;
- ret = drm_dp_dpcd_readb(aux, DP_PSR_STATUS, status);
+ ret = drm_dp_dpcd_readb(aux, offset, status);
if (ret != 1)
return ret;
- ret = drm_dp_dpcd_readb(aux, DP_PSR_ERROR_STATUS, error_status);
+ offset = intel_dp->psr.panel_replay_enabled ?
+ DP_PANEL_REPLAY_ERROR_STATUS : DP_PSR_ERROR_STATUS;
+
+ ret = drm_dp_dpcd_readb(aux, offset, error_status);
if (ret != 1)
return ret;
@@ -2964,7 +3055,7 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
status = live_status[status_val];
}
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+ seq_printf(m, "Source PSR/PanelReplay status: %s [0x%08x]\n", status, val);
}
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
@@ -2977,18 +3068,22 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
bool enabled;
u32 val;
- seq_printf(m, "Sink support: %s", str_yes_no(psr->sink_support));
+ seq_printf(m, "Sink support: PSR = %s",
+ str_yes_no(psr->sink_support));
+
if (psr->sink_support)
seq_printf(m, " [0x%02x]", intel_dp->psr_dpcd[0]);
- seq_puts(m, "\n");
+ seq_printf(m, ", Panel Replay = %s\n", str_yes_no(psr->sink_panel_replay_support));
- if (!psr->sink_support)
+ if (!(psr->sink_support || psr->sink_panel_replay_support))
return 0;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
mutex_lock(&psr->lock);
- if (psr->enabled)
+ if (psr->panel_replay_enabled)
+ status = "Panel Replay Enabled";
+ else if (psr->enabled)
status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
else
status = "disabled";
@@ -3001,14 +3096,17 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
goto unlock;
}
- if (psr->psr2_enabled) {
+ if (psr->panel_replay_enabled) {
+ val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
+ enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
+ } else if (psr->psr2_enabled) {
val = intel_de_read(dev_priv, EDP_PSR2_CTL(cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
str_enabled_disabled(enabled), val);
psr_source_status(intel_dp, m);
seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
@@ -3146,6 +3244,16 @@ void intel_psr_debugfs_register(struct drm_i915_private *i915)
i915, &i915_edp_psr_status_fops);
}
+static const char *psr_mode_str(struct intel_dp *intel_dp)
+{
+ if (intel_dp->psr.panel_replay_enabled)
+ return "PANEL-REPLAY";
+ else if (intel_dp->psr.enabled)
+ return "PSR";
+
+ return "unknown";
+}
+
static int i915_psr_sink_status_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = m->private;
@@ -3160,12 +3268,19 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
"reserved",
"sink internal error",
};
+ static const char * const panel_replay_status[] = {
+ "Sink device frame is locked to the Source device",
+ "Sink device is coasting, using the VTotal target",
+ "Sink device is governing the frame rate (frame rate unlock is granted)",
+ "Sink device in the process of re-locking with the Source device",
+ };
const char *str;
int ret;
u8 status, error_status;
+ u32 idx;
- if (!CAN_PSR(intel_dp)) {
- seq_puts(m, "PSR Unsupported\n");
+ if (!(CAN_PSR(intel_dp) || CAN_PANEL_REPLAY(intel_dp))) {
+ seq_puts(m, "PSR/Panel-Replay Unsupported\n");
return -ENODEV;
}
@@ -3176,15 +3291,20 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
if (ret)
return ret;
- status &= DP_PSR_SINK_STATE_MASK;
- if (status < ARRAY_SIZE(sink_status))
- str = sink_status[status];
- else
- str = "unknown";
+ str = "unknown";
+ if (intel_dp->psr.panel_replay_enabled) {
+ idx = (status & DP_SINK_FRAME_LOCKED_MASK) >> DP_SINK_FRAME_LOCKED_SHIFT;
+ if (idx < ARRAY_SIZE(panel_replay_status))
+ str = panel_replay_status[idx];
+ } else if (intel_dp->psr.enabled) {
+ idx = status & DP_PSR_SINK_STATE_MASK;
+ if (idx < ARRAY_SIZE(sink_status))
+ str = sink_status[idx];
+ }
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", status, str);
+ seq_printf(m, "Sink %s status: 0x%x [%s]\n", psr_mode_str(intel_dp), status, str);
- seq_printf(m, "Sink PSR error status: 0x%x", error_status);
+ seq_printf(m, "Sink %s error status: 0x%x", psr_mode_str(intel_dp), error_status);
if (error_status & (DP_PSR_RFB_STORAGE_ERROR |
DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR |
@@ -3193,11 +3313,11 @@ static int i915_psr_sink_status_show(struct seq_file *m, void *data)
else
seq_puts(m, "\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- seq_puts(m, "\tPSR RFB storage error\n");
+ seq_printf(m, "\t%s RFB storage error\n", psr_mode_str(intel_dp));
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- seq_puts(m, "\tPSR VSC SDP uncorrectable error\n");
+ seq_printf(m, "\t%s VSC SDP uncorrectable error\n", psr_mode_str(intel_dp));
if (error_status & DP_PSR_LINK_CRC_ERROR)
- seq_puts(m, "\tPSR Link CRC error\n");
+ seq_printf(m, "\t%s Link CRC error\n", psr_mode_str(intel_dp));
return ret;
}
@@ -3217,13 +3337,16 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
- if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
+ /* TODO: Add support for MST connectors as well. */
+ if ((connector->base.connector_type != DRM_MODE_CONNECTOR_eDP &&
+ connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+ connector->mst_port)
return;
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(i915))
+ if (HAS_PSR(i915) || HAS_DP20(i915))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index bf35f42df6..143e0595c0 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -21,6 +21,13 @@ struct intel_encoder;
struct intel_plane;
struct intel_plane_state;
+#define CAN_PSR(intel_dp) ((intel_dp)->psr.sink_support && \
+ (intel_dp)->psr.source_support)
+
+#define CAN_PANEL_REPLAY(intel_dp) ((intel_dp)->psr.sink_panel_replay_support && \
+ (intel_dp)->psr.source_panel_replay_support)
+
+bool intel_encoder_can_psr(struct intel_encoder *encoder);
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
@@ -48,16 +55,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state);
-void intel_psr2_program_plane_sel_fetch_noarm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state,
- int color_plane);
-void intel_psr2_program_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-
-void intel_psr2_disable_plane_sel_fetch_arm(struct intel_plane *plane,
- const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_psr_regs.h b/drivers/gpu/drm/i915/display/intel_psr_regs.h
index d39951383c..efe4306b37 100644
--- a/drivers/gpu/drm/i915/display/intel_psr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_psr_regs.h
@@ -35,6 +35,8 @@
#define EDP_PSR_MIN_LINK_ENTRY_TIME_0_LINES REG_FIELD_PREP(EDP_PSR_MIN_LINK_ENTRY_TIME_MASK, 3)
#define EDP_PSR_MAX_SLEEP_TIME_MASK REG_GENMASK(24, 20)
#define EDP_PSR_MAX_SLEEP_TIME(x) REG_FIELD_PREP(EDP_PSR_MAX_SLEEP_TIME_MASK, (x))
+#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK REG_GENMASK(17, 16)
+#define LNL_EDP_PSR_ENTRY_SETUP_FRAMES(x) REG_FIELD_PREP(LNL_EDP_PSR_ENTRY_SETUP_FRAMES_MASK, (x))
#define EDP_PSR_SKIP_AUX_EXIT REG_BIT(12)
#define EDP_PSR_TP_MASK REG_BIT(11)
#define EDP_PSR_TP_TP1_TP2 REG_FIELD_PREP(EDP_PSR_TP_MASK, 0)
diff --git a/drivers/gpu/drm/i915/display/intel_qp_tables.c b/drivers/gpu/drm/i915/display/intel_qp_tables.c
index 543cdc46aa..600c815e37 100644
--- a/drivers/gpu/drm/i915/display/intel_qp_tables.c
+++ b/drivers/gpu/drm/i915/display/intel_qp_tables.c
@@ -34,9 +34,6 @@
* These qp tables are as per the C model
* and it has the rows pointing to bpps which increment
* in steps of 0.5
- * We do not support fractional bpps as of today,
- * hence we would skip the fractional bpps during
- * our references for qp calclulations.
*/
static const u8 rc_range_minqp444_8bpc[DSC_NUM_BUF_RANGES][RC_RANGE_QP444_8BPC_MAX_NUM_BPP] = {
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 312f88d90a..cc978ee6d1 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -35,6 +35,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_eld.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -1787,17 +1788,28 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
intel_sdvo_get_eld(intel_sdvo, pipe_config);
}
-static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
+static void intel_sdvo_disable_audio(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state)
{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
+
+ if (!old_crtc_state->has_audio)
+ return;
+
intel_sdvo_set_audio_state(intel_sdvo, 0);
}
-static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
+static void intel_sdvo_enable_audio(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
const u8 *eld = crtc_state->eld;
+ if (!crtc_state->has_audio)
+ return;
+
intel_sdvo_set_audio_state(intel_sdvo, 0);
intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
@@ -1818,9 +1830,6 @@ static void intel_disable_sdvo(struct intel_atomic_state *state,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- if (old_crtc_state->has_audio)
- intel_sdvo_disable_audio(intel_sdvo);
-
intel_sdvo_set_active_outputs(intel_sdvo, 0);
if (0)
intel_sdvo_set_encoder_power_state(intel_sdvo,
@@ -1912,9 +1921,6 @@ static void intel_enable_sdvo(struct intel_atomic_state *state,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_ON);
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo_connector->output_flag);
-
- if (pipe_config->has_audio)
- intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
}
static enum drm_mode_status
@@ -3317,7 +3323,6 @@ intel_sdvo_init_ddc_proxy(struct intel_sdvo_ddc *ddc,
ddc->ddc_bus = ddc_bus;
ddc->ddc.owner = THIS_MODULE;
- ddc->ddc.class = I2C_CLASS_DDC;
snprintf(ddc->ddc.name, I2C_NAME_SIZE, "SDVO %c DDC%d",
port_name(sdvo->base.port), ddc_bus);
ddc->ddc.dev.parent = &pdev->dev;
@@ -3396,6 +3401,8 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
}
intel_encoder->pre_enable = intel_sdvo_pre_enable;
intel_encoder->enable = intel_enable_sdvo;
+ intel_encoder->audio_enable = intel_sdvo_enable_audio;
+ intel_encoder->audio_disable = intel_sdvo_disable_audio;
intel_encoder->get_hw_state = intel_sdvo_get_hw_state;
intel_encoder->get_config = intel_sdvo_get_config;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index ce5a73a4cc..bc61e736f9 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -3,7 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
-#include <linux/util_macros.h>
+#include <linux/math.h>
#include "i915_reg.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index 1fb16510f7..d7b440c8ca 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -48,6 +48,11 @@
#include "intel_frontbuffer.h"
#include "intel_sprite.h"
+static char sprite_name(struct drm_i915_private *i915, enum pipe pipe, int sprite)
+{
+ return pipe * DISPLAY_RUNTIME_INFO(i915)->num_sprites[pipe] + sprite + 'A';
+}
+
static void i9xx_plane_linear_gamma(u16 gamma[8])
{
/* The points are not evenly spaced. */
@@ -1636,7 +1641,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
- "sprite %c", sprite_name(pipe, sprite));
+ "sprite %c", sprite_name(dev_priv, pipe, sprite));
kfree(modifiers);
if (ret)
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index f64d348a96..dcf05e00e5 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -1030,18 +1030,25 @@ static bool xelpdp_tc_phy_enable_tcss_power(struct intel_tc_port *tc, bool enabl
__xelpdp_tc_phy_enable_tcss_power(tc, enable);
- if ((!tc_phy_wait_for_ready(tc) ||
- !xelpdp_tc_phy_wait_for_tcss_power(tc, enable)) &&
- !drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY)) {
- if (enable) {
- __xelpdp_tc_phy_enable_tcss_power(tc, false);
- xelpdp_tc_phy_wait_for_tcss_power(tc, false);
- }
+ if (enable && !tc_phy_wait_for_ready(tc))
+ goto out_disable;
- return false;
- }
+ if (!xelpdp_tc_phy_wait_for_tcss_power(tc, enable))
+ goto out_disable;
return true;
+
+out_disable:
+ if (drm_WARN_ON(&i915->drm, tc->mode == TC_PORT_LEGACY))
+ return false;
+
+ if (!enable)
+ return false;
+
+ __xelpdp_tc_phy_enable_tcss_power(tc, false);
+ xelpdp_tc_phy_wait_for_tcss_power(tc, false);
+
+ return false;
}
static void xelpdp_tc_phy_take_ownership(struct intel_tc_port *tc, bool take)
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index f790fd10ba..992a725de7 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1417,9 +1417,6 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
static void set_color_conversion(struct drm_i915_private *dev_priv,
const struct color_conversion *color_conversion)
{
- if (!color_conversion)
- return;
-
intel_de_write(dev_priv, TV_CSC_Y,
(color_conversion->ry << 16) | color_conversion->gy);
intel_de_write(dev_priv, TV_CSC_Y2,
@@ -1454,9 +1451,6 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
int xpos, ypos;
unsigned int xsize, ysize;
- if (!tv_mode)
- return; /* can't happen (mode_prepare prevents this) */
-
tv_ctl = intel_de_read(dev_priv, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c
index 2cec2abf97..fe256bf7b4 100644
--- a/drivers/gpu/drm/i915/display/intel_vblank.c
+++ b/drivers/gpu/drm/i915/display/intel_vblank.c
@@ -265,6 +265,32 @@ int intel_crtc_scanline_to_hw(struct intel_crtc *crtc, int scanline)
return (scanline + vtotal - crtc->scanline_offset) % vtotal;
}
+/*
+ * The uncore version of the spin lock functions is used to decide
+ * whether we need to lock the uncore lock or not. This is only
+ * needed in i915, not in Xe.
+ *
+ * This lock in i915 is needed because some old platforms (at least
+ * IVB and possibly HSW as well), which are not supported in Xe, need
+ * all register accesses to the same cacheline to be serialized,
+ * otherwise they may hang.
+ */
+static void intel_vblank_section_enter(struct drm_i915_private *i915)
+ __acquires(i915->uncore.lock)
+{
+#ifdef I915
+ spin_lock(&i915->uncore.lock);
+#endif
+}
+
+static void intel_vblank_section_exit(struct drm_i915_private *i915)
+ __releases(i915->uncore.lock)
+{
+#ifdef I915
+ spin_unlock(&i915->uncore.lock);
+#endif
+}
+
static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
bool in_vblank_irq,
int *vpos, int *hpos,
@@ -302,11 +328,12 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
}
/*
- * Lock uncore.lock, as we will do multiple timing critical raw
- * register reads, potentially with preemption disabled, so the
- * following code must not block on uncore.lock.
+ * Enter vblank critical section, as we will do multiple
+ * timing critical raw register reads, potentially with
+ * preemption disabled, so the following code must not block.
*/
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ local_irq_save(irqflags);
+ intel_vblank_section_enter(dev_priv);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -374,7 +401,8 @@ static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+ intel_vblank_section_exit(dev_priv);
+ local_irq_restore(irqflags);
/*
* While in vblank, position will be negative
@@ -412,9 +440,13 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc)
unsigned long irqflags;
int position;
- spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+ local_irq_save(irqflags);
+ intel_vblank_section_enter(dev_priv);
+
position = __intel_get_crtc_scanline(crtc);
- spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
+
+ intel_vblank_section_exit(dev_priv);
+ local_irq_restore(irqflags);
return position;
}
@@ -537,7 +569,7 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
* Need to audit everything to make sure it's safe.
*/
spin_lock_irqsave(&i915->drm.vblank_time_lock, irqflags);
- spin_lock(&i915->uncore.lock);
+ intel_vblank_section_enter(i915);
drm_calc_timestamping_constants(&crtc->base, &adjusted_mode);
@@ -546,7 +578,6 @@ void intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state,
crtc->mode_flags = mode_flags;
crtc->scanline_offset = intel_crtc_scanline_offset(crtc_state);
-
- spin_unlock(&i915->uncore.lock);
+ intel_vblank_section_exit(i915);
spin_unlock_irqrestore(&i915->drm.vblank_time_lock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 6757dbae9e..17d6572f9d 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -77,8 +77,8 @@ intel_vdsc_set_min_max_qp(struct drm_dsc_config *vdsc_cfg, int buf,
static void
calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
{
+ int bpp = to_bpp_int(vdsc_cfg->bits_per_pixel);
int bpc = vdsc_cfg->bits_per_component;
- int bpp = vdsc_cfg->bits_per_pixel >> 4;
int qp_bpc_modifier = (bpc - 8) * 2;
int uncompressed_bpg_rate;
int first_line_bpg_offset;
@@ -148,7 +148,13 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
static const s8 ofs_und8[] = {
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
};
-
+ /*
+ * For 420 format since bits_per_pixel (bpp) is set to target bpp * 2,
+ * QP table values for target bpp 4.0 to 4.4375 (rounded to 4.0) are
+ * actually for bpp 8 to 8.875 (rounded to 4.0 * 2 i.e 8).
+ * Similarly values for target bpp 4.5 to 4.8375 (rounded to 4.5)
+ * are for bpp 9 to 9.875 (rounded to 4.5 * 2 i.e 9), and so on.
+ */
bpp_i = bpp - 8;
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
u8 range_bpg_offset;
@@ -178,6 +184,9 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
range_bpg_offset & DSC_RANGE_BPG_OFFSET_MASK;
}
} else {
+ /* fractional bpp part * 10000 (for precision up to 4 decimal places) */
+ int fractional_bits = to_bpp_frac(vdsc_cfg->bits_per_pixel);
+
static const s8 ofs_und6[] = {
0, -2, -2, -4, -6, -6, -8, -8, -8, -10, -10, -12, -12, -12, -12
};
@@ -191,7 +200,14 @@ calculate_rc_params(struct drm_dsc_config *vdsc_cfg)
10, 8, 6, 4, 2, 0, -2, -4, -6, -8, -10, -10, -12, -12, -12
};
- bpp_i = (2 * (bpp - 6));
+ /*
+ * QP table rows have values in increment of 0.5.
+ * So 6.0 bpp to 6.4375 will have index 0, 6.5 to 6.9375 will have index 1,
+ * and so on.
+ * 0.5 fractional part with 4 decimal precision becomes 5000
+ */
+ bpp_i = ((bpp - 6) + (fractional_bits < 5000 ? 0 : 1));
+
for (buf_i = 0; buf_i < DSC_NUM_BUF_RANGES; buf_i++) {
u8 range_bpg_offset;
@@ -248,7 +264,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config;
- u16 compressed_bpp = pipe_config->dsc.compressed_bpp;
+ u16 compressed_bpp = to_bpp_int(pipe_config->dsc.compressed_bpp_x16);
int err;
int ret;
@@ -279,8 +295,7 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
/* Gen 11 does not support VBR */
vdsc_cfg->vbr_enable = false;
- /* Gen 11 only supports integral values of bpp */
- vdsc_cfg->bits_per_pixel = compressed_bpp << 4;
+ vdsc_cfg->bits_per_pixel = pipe_config->dsc.compressed_bpp_x16;
/*
* According to DSC 1.2 specs in Section 4.1 if native_420 is set
@@ -797,13 +812,13 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
}
static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
- bool *check_equal)
+ bool *all_equal)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
i915_reg_t dsc_reg[2];
int i, vdsc_per_pipe, dsc_reg_num;
- u32 val = 0;
+ u32 val;
vdsc_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
dsc_reg_num = min_t(int, ARRAY_SIZE(dsc_reg), vdsc_per_pipe);
@@ -812,20 +827,13 @@ static u32 intel_dsc_pps_read(struct intel_crtc_state *crtc_state, int pps,
intel_dsc_get_pps_reg(crtc_state, pps, dsc_reg, dsc_reg_num);
- if (check_equal)
- *check_equal = true;
-
- for (i = 0; i < dsc_reg_num; i++) {
- u32 tmp;
+ *all_equal = true;
- tmp = intel_de_read(i915, dsc_reg[i]);
+ val = intel_de_read(i915, dsc_reg[0]);
- if (i == 0) {
- val = tmp;
- } else if (check_equal && tmp != val) {
- *check_equal = false;
- break;
- } else if (!check_equal) {
+ for (i = 1; i < dsc_reg_num; i++) {
+ if (intel_de_read(i915, dsc_reg[i]) != val) {
+ *all_equal = false;
break;
}
}
@@ -874,7 +882,7 @@ static void intel_dsc_get_pps_config(struct intel_crtc_state *crtc_state)
if (vdsc_cfg->native_420)
vdsc_cfg->bits_per_pixel >>= 1;
- crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
+ crtc_state->dsc.compressed_bpp_x16 = vdsc_cfg->bits_per_pixel;
/* PPS 2 */
pps_temp = intel_dsc_pps_read_and_verify(crtc_state, 2);
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index eb5bd07439..f542ee1db1 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -117,6 +117,13 @@ intel_vrr_compute_config(struct intel_crtc_state *crtc_state,
const struct drm_display_info *info = &connector->base.display_info;
int vmin, vmax;
+ /*
+ * FIXME all joined pipes share the same transcoder.
+ * Need to account for that during VRR toggle/push/etc.
+ */
+ if (crtc_state->bigjoiner_pipes)
+ return;
+
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return;
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 245a64332c..8bba6c2e50 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -18,10 +18,10 @@
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
#include "intel_psr.h"
+#include "intel_psr_regs.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
#include "skl_watermark.h"
-#include "gt/intel_gt.h"
#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
@@ -630,6 +630,18 @@ skl_plane_disable_arm(struct intel_plane *plane,
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
+static void icl_plane_disable_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
+}
+
static void
icl_plane_disable_arm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state)
@@ -643,7 +655,7 @@ icl_plane_disable_arm(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
- intel_psr2_disable_plane_sel_fetch_arm(plane, crtc_state);
+ icl_plane_disable_sel_fetch_arm(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
}
@@ -1007,7 +1019,8 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
* The DPT object contains only one vma, so the VMA's offset
* within the DPT is always 0.
*/
- drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start);
+ drm_WARN_ON(&i915->drm, plane_state->dpt_vma &&
+ plane_state->dpt_vma->node.start);
drm_WARN_ON(&i915->drm, offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1197,6 +1210,48 @@ skl_plane_update_arm(struct intel_plane *plane,
skl_plane_surf(plane_state, 0));
}
+static void icl_plane_update_sel_fetch_noarm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+ const struct drm_rect *clip;
+ u32 val;
+ int x, y;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ clip = &plane_state->psr2_sel_fetch_area;
+
+ val = (clip->y1 + plane_state->uapi.dst.y1) << 16;
+ val |= plane_state->uapi.dst.x1;
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_POS(pipe, plane->id), val);
+
+ x = plane_state->view.color_plane[color_plane].x;
+
+ /*
+ * From Bspec: UV surface Start Y Position = half of Y plane Y
+ * start position.
+ */
+ if (!color_plane)
+ y = plane_state->view.color_plane[color_plane].y + clip->y1;
+ else
+ y = plane_state->view.color_plane[color_plane].y + clip->y1 / 2;
+
+ val = y << 16 | x;
+
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
+ val);
+
+ /* Sizes are 0 based */
+ val = (drm_rect_height(clip) - 1) << 16;
+ val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
+}
+
static void
icl_plane_update_noarm(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1269,7 +1324,24 @@ icl_plane_update_noarm(struct intel_plane *plane,
if (plane_state->force_black)
icl_plane_csc_load_black(plane);
- intel_psr2_program_plane_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
+ icl_plane_update_sel_fetch_noarm(plane, crtc_state, plane_state, color_plane);
+}
+
+static void icl_plane_update_sel_fetch_arm(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ if (drm_rect_height(&plane_state->psr2_sel_fetch_area) > 0)
+ intel_de_write_fw(i915, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
+ else
+ icl_plane_disable_sel_fetch_arm(plane, crtc_state);
}
static void
@@ -1296,7 +1368,7 @@ icl_plane_update_arm(struct intel_plane *plane,
if (plane_state->scaler_id >= 0)
skl_program_plane_scaler(plane, crtc_state, plane_state);
- intel_psr2_program_plane_sel_fetch_arm(plane, crtc_state, plane_state);
+ icl_plane_update_sel_fetch_arm(plane, crtc_state, plane_state);
/*
* The control register self-arms if the plane was previously
@@ -1855,16 +1927,19 @@ static bool skl_fb_scalable(const struct drm_framebuffer *fb)
}
}
-static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+static void check_protection(struct intel_plane_state *plane_state)
{
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- return intel_pxp_key_check(i915->pxp, obj, false) == 0;
-}
+ if (DISPLAY_VER(i915) < 11)
+ return;
-static bool pxp_is_borked(struct drm_i915_gem_object *obj)
-{
- return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+ plane_state->decrypt = intel_pxp_key_check(i915->pxp, obj, false) == 0;
+ plane_state->force_black = i915_gem_object_is_protected(obj) &&
+ !plane_state->decrypt;
}
static int skl_plane_check(struct intel_crtc_state *crtc_state,
@@ -1911,10 +1986,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state,
if (ret)
return ret;
- if (DISPLAY_VER(dev_priv) >= 11) {
- plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
- plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
- }
+ check_protection(plane_state);
/* HW only has 8 bits pixel precision, disable plane if invisible */
if (!(plane_state->hw.alpha >> 8))
@@ -2218,6 +2290,9 @@ static u8 skl_get_plane_caps(struct drm_i915_private *i915,
if (HAS_4TILE(i915))
caps |= INTEL_PLANE_CAP_TILING_4;
+ if (!IS_ENABLED(I915) && !HAS_FLAT_CCS(i915))
+ return caps;
+
if (skl_plane_has_rc_ccs(i915, pipe, plane_id)) {
caps |= INTEL_PLANE_CAP_CCS_RC;
if (DISPLAY_VER(i915) >= 12)
@@ -2489,7 +2564,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
goto error;
}
- if (!dev_priv->params.enable_dpt &&
+ if (!dev_priv->display.params.enable_dpt &&
intel_fb_modifier_uses_dpt(dev_priv, fb->modifier)) {
drm_dbg_kms(&dev_priv->drm, "DPT disabled, skipping initial FB\n");
goto error;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index 99b8ccdc3d..56588d6e24 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -412,7 +412,7 @@ static bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->params.enable_sagv)
+ if (!i915->display.params.enable_sagv)
return false;
if (DISPLAY_VER(i915) >= 12)
@@ -3702,7 +3702,8 @@ static int intel_sagv_status_show(struct seq_file *m, void *unused)
};
seq_printf(m, "SAGV available: %s\n", str_yes_no(intel_has_sagv(i915)));
- seq_printf(m, "SAGV modparam: %s\n", str_enabled_disabled(i915->params.enable_sagv));
+ seq_printf(m, "SAGV modparam: %s\n",
+ str_enabled_disabled(i915->display.params.enable_sagv));
seq_printf(m, "SAGV status: %s\n", sagv_status[i915->display.sagv.status]);
seq_printf(m, "SAGV block time: %d usec\n", i915->display.sagv.block_time_us);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index f488394d31..9b33b8a74d 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -561,6 +561,12 @@ static void glk_dsi_clear_device_ready(struct intel_encoder *encoder)
glk_dsi_disable_mipi_io(encoder);
}
+static i915_reg_t port_ctrl_reg(struct drm_i915_private *i915, enum port port)
+{
+ return IS_GEMINILAKE(i915) || IS_BROXTON(i915) ?
+ BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+}
+
static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -570,7 +576,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
drm_dbg_kms(&dev_priv->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
+ i915_reg_t port_ctrl = IS_BROXTON(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
@@ -589,7 +595,7 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
* On VLV/CHV, wait till Clock lanes are in LP-00 state for MIPI
* Port A only. MIPI Port C has no similar bit for checking.
*/
- if ((IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) || port == PORT_A) &&
+ if ((IS_BROXTON(dev_priv) || port == PORT_A) &&
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
drm_err(&dev_priv->drm, "DSI LP not going Low\n");
@@ -627,8 +633,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
u32 temp;
temp = intel_de_read(dev_priv, port_ctrl);
@@ -664,8 +669,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t port_ctrl = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
/* de-assert ip_tg_enable signal */
intel_de_rmw(dev_priv, port_ctrl, DPI_ENABLE, 0);
@@ -955,9 +959,8 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
- i915_reg_t ctrl_reg = IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ?
- BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
+ i915_reg_t port_ctrl = port_ctrl_reg(dev_priv, port);
+ bool enabled = intel_de_read(dev_priv, port_ctrl) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -1529,16 +1532,8 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
}
}
-static void intel_dsi_encoder_destroy(struct drm_encoder *encoder)
-{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(encoder));
-
- intel_dsi_vbt_gpio_cleanup(intel_dsi);
- intel_encoder_destroy(encoder);
-}
-
static const struct drm_encoder_funcs intel_dsi_funcs = {
- .destroy = intel_dsi_encoder_destroy,
+ .destroy = intel_encoder_destroy,
};
static enum drm_mode_status vlv_dsi_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index e38f06a6e5..dcbfe32fd3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -279,7 +279,8 @@ static int proto_context_set_protected(struct drm_i915_private *i915,
}
static struct i915_gem_proto_context *
-proto_context_create(struct drm_i915_private *i915, unsigned int flags)
+proto_context_create(struct drm_i915_file_private *fpriv,
+ struct drm_i915_private *i915, unsigned int flags)
{
struct i915_gem_proto_context *pc, *err;
@@ -287,6 +288,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
if (!pc)
return ERR_PTR(-ENOMEM);
+ pc->fpriv = fpriv;
pc->num_user_engines = -1;
pc->user_engines = NULL;
pc->user_flags = BIT(UCONTEXT_BANNABLE) |
@@ -1622,6 +1624,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
err = PTR_ERR(ppgtt);
goto err_ctx;
}
+ ppgtt->vm.fpriv = pc->fpriv;
vm = &ppgtt->vm;
}
if (vm)
@@ -1741,7 +1744,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
/* 0 reserved for invalid/unassigned ppgtt */
xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(file_priv, i915, 0);
if (IS_ERR(pc)) {
err = PTR_ERR(pc);
goto err;
@@ -1823,6 +1826,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
args->vm_id = id;
+ ppgtt->vm.fpriv = file_priv;
return 0;
err_put:
@@ -2285,7 +2289,8 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return -EIO;
}
- ext_data.pc = proto_context_create(i915, args->flags);
+ ext_data.pc = proto_context_create(file->driver_priv, i915,
+ args->flags);
if (IS_ERR(ext_data.pc))
return PTR_ERR(ext_data.pc);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index cb78214a7d..03bc7f9d19 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -188,6 +188,9 @@ struct i915_gem_proto_engine {
* CONTEXT_CREATE_SET_PARAM during GEM_CONTEXT_CREATE.
*/
struct i915_gem_proto_context {
+ /** @fpriv: Client which creates the context */
+ struct drm_i915_file_private *fpriv;
+
/** @vm: See &i915_gem_context.vm */
struct i915_address_space *vm;
@@ -409,9 +412,9 @@ struct i915_gem_context {
/** @stale: tracks stale engines to be destroyed */
struct {
- /** @lock: guards engines */
+ /** @stale.lock: guards engines */
spinlock_t lock;
- /** @engines: list of stale engines */
+ /** @stale.engines: list of stale engines */
struct list_head engines;
} stale;
};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 683fd8d315..555022c065 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -9,6 +9,7 @@
#include <linux/sync_file.h>
#include <linux/uaccess.h>
+#include <drm/drm_auth.h>
#include <drm/drm_syncobj.h>
#include "display/intel_frontbuffer.h"
@@ -253,6 +254,8 @@ struct i915_execbuffer {
struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
+ intel_wakeref_t wakeref;
+ intel_wakeref_t wakeref_gt0;
/** our requests to build */
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
@@ -1156,7 +1159,7 @@ static void reloc_cache_unmap(struct reloc_cache *cache)
vaddr = unmask_page(cache->vaddr);
if (cache->vaddr & KMAP)
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
else
io_mapping_unmap_atomic((void __iomem *)vaddr);
}
@@ -1172,7 +1175,7 @@ static void reloc_cache_remap(struct reloc_cache *cache,
if (cache->vaddr & KMAP) {
struct page *page = i915_gem_object_get_page(obj, cache->page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) |
(unsigned long)vaddr;
} else {
@@ -1202,7 +1205,7 @@ static void reloc_cache_reset(struct reloc_cache *cache, struct i915_execbuffer
if (cache->vaddr & CLFLUSH_AFTER)
mb();
- kunmap_atomic(vaddr);
+ kunmap_local(vaddr);
i915_gem_object_finish_access(obj);
} else {
struct i915_ggtt *ggtt = cache_to_ggtt(cache);
@@ -1234,7 +1237,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
struct page *page;
if (cache->vaddr) {
- kunmap_atomic(unmask_page(cache->vaddr));
+ kunmap_local(unmask_page(cache->vaddr));
} else {
unsigned int flushes;
int err;
@@ -1256,7 +1259,7 @@ static void *reloc_kmap(struct drm_i915_gem_object *obj,
if (!obj->mm.dirty)
set_page_dirty(page);
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
cache->vaddr = unmask_flags(cache->vaddr) | (unsigned long)vaddr;
cache->page = pageno;
@@ -1678,7 +1681,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb)
urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
size = nreloc * sizeof(*relocs);
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
+ relocs = kvmalloc_array(1, size, GFP_KERNEL);
if (!relocs) {
err = -ENOMEM;
goto err;
@@ -2719,13 +2722,13 @@ eb_select_engine(struct i915_execbuffer *eb)
for_each_child(ce, child)
intel_context_get(child);
- intel_gt_pm_get(gt);
+ eb->wakeref = intel_gt_pm_get(ce->engine->gt);
/*
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
* free VMAs while execbuf ioctl is validating VMAs.
*/
if (gt->info.id)
- intel_gt_pm_get(to_gt(gt->i915));
+ eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
err = intel_context_alloc_state(ce);
@@ -2765,9 +2768,9 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
if (gt->info.id)
- intel_gt_pm_put(to_gt(gt->i915));
+ intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(ce->engine->gt, eb->wakeref);
for_each_child(ce, child)
intel_context_put(child);
intel_context_put(ce);
@@ -2785,8 +2788,8 @@ eb_put_engine(struct i915_execbuffer *eb)
* i915_vma_parked() from interfering while execbuf validates vmas.
*/
if (eb->gt->info.id)
- intel_gt_pm_put(to_gt(eb->gt->i915));
- intel_gt_pm_put(eb->gt);
+ intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
+ intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
for_each_child(eb->context, child)
intel_context_put(child);
intel_context_put(eb->context);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 6bc26b4b06..ea7561ae6e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -36,7 +36,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct sg_table *st;
struct scatterlist *sg;
unsigned int npages; /* restricted by sg_alloc_table */
- int max_order = MAX_ORDER;
+ int max_order = MAX_PAGE_ORDER;
unsigned int max_segment;
gfp_t gfp;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index c26d875558..58e6c680fe 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -106,6 +106,10 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->mm.link);
+#ifdef CONFIG_PROC_FS
+ INIT_LIST_HEAD(&obj->client_link);
+#endif
+
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->lut_lock);
@@ -293,6 +297,10 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ /* We need to keep this alive for RCU read access from fdinfo. */
+ if (obj->mm.n_placements > 1)
+ kfree(obj->mm.placements);
+
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -389,9 +397,6 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
if (obj->ops->release)
obj->ops->release(obj);
- if (obj->mm.n_placements > 1)
- kfree(obj->mm.placements);
-
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
@@ -442,6 +447,8 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
GEM_BUG_ON(i915_gem_object_is_framebuffer(obj));
+ i915_drm_client_remove_object(obj);
+
/*
* Before we free the object, make sure any pure RCU-only
* read-side critical sections are complete, e.g.
@@ -493,17 +500,15 @@ static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
pgoff_t idx = offset >> PAGE_SHIFT;
- void *src_map;
void *src_ptr;
- src_map = kmap_atomic(i915_gem_object_get_page(obj, idx));
-
- src_ptr = src_map + offset_in_page(offset);
+ src_ptr = kmap_local_page(i915_gem_object_get_page(obj, idx))
+ + offset_in_page(offset);
if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
drm_clflush_virt_range(src_ptr, size);
memcpy(dst, src_ptr, size);
- kunmap_atomic(src_map);
+ kunmap_local(src_ptr);
}
static void
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2292404007..0c5cdab278 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -302,6 +302,18 @@ struct drm_i915_gem_object {
*/
struct i915_address_space *shares_resv_from;
+#ifdef CONFIG_PROC_FS
+ /**
+ * @client: @i915_drm_client which created the object
+ */
+ struct i915_drm_client *client;
+
+ /**
+ * @client_link: Link into @i915_drm_client.objects_list
+ */
+ struct list_head client_link;
+#endif
+
union {
struct rcu_head rcu;
struct llist_node freed;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index 5df128e2f4..ef85c6dc9f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -65,16 +65,13 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
dst = vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- void *src;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
goto err_st;
- src = kmap_atomic(page);
- memcpy(dst, src, PAGE_SIZE);
+ memcpy_from_page(dst, page, 0, PAGE_SIZE);
drm_clflush_virt_range(dst, PAGE_SIZE);
- kunmap_atomic(src);
put_page(page);
dst += PAGE_SIZE;
@@ -113,16 +110,13 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- char *dst;
page = shmem_read_mapping_page(mapping, i);
if (IS_ERR(page))
continue;
- dst = kmap_atomic(page);
drm_clflush_virt_range(src, PAGE_SIZE);
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ memcpy_to_page(page, 0, src, PAGE_SIZE);
set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 73a4a4eb29..38b72d8656 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -485,11 +485,13 @@ shmem_pwrite(struct drm_i915_gem_object *obj,
if (err < 0)
return err;
- vaddr = kmap_atomic(page);
+ vaddr = kmap_local_page(page);
+ pagefault_disable();
unwritten = __copy_from_user_inatomic(vaddr + pg,
user_data,
len);
- kunmap_atomic(vaddr);
+ pagefault_enable();
+ kunmap_local(vaddr);
err = aops->write_end(obj->base.filp, mapping, offset, len,
len - unwritten, page, data);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 1a766d8e7c..8c88075eea 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -386,6 +386,27 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+ /* Wa_14019821291 */
+ if (MEDIA_VER_FULL(i915) == IP_VER(13, 0)) {
+ /*
+ * This workaround is primarily implemented by the BIOS. We
+ * just need to figure out whether the BIOS has applied the
+ * workaround (meaning the programmed address falls within
+ * the DSM) and, if so, reserve that part of the DSM to
+ * prevent accidental reuse. The DSM location should be just
+ * below the WOPCM.
+ */
+ u64 gscpsmi_base = intel_uncore_read64_2x32(uncore,
+ MTL_GSCPSMI_BASEADDR_LSB,
+ MTL_GSCPSMI_BASEADDR_MSB);
+ if (gscpsmi_base >= i915->dsm.stolen.start &&
+ gscpsmi_base < i915->dsm.stolen.end) {
+ *base = gscpsmi_base;
+ *size = i915->dsm.stolen.end - gscpsmi_base;
+ return;
+ }
+ }
+
switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
case GEN8_STOLEN_RESERVED_1M:
*size = 1024 * 1024;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 6b9f6cf50b..3ff3d8889c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -115,7 +115,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
do {
struct page *page;
- GEM_BUG_ON(order > MAX_ORDER);
+ GEM_BUG_ON(order > MAX_PAGE_ORDER);
page = alloc_pages(GFP | __GFP_ZERO, order);
if (!page)
goto err;
@@ -1082,7 +1082,7 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
goto err_unlock;
for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) {
- u32 *ptr = kmap_atomic(i915_gem_object_get_page(obj, n));
+ u32 *ptr = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(ptr, PAGE_SIZE);
@@ -1090,12 +1090,12 @@ __cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val)
if (ptr[dword] != val) {
pr_err("n=%lu ptr[%u]=%u, val=%u\n",
n, dword, ptr[dword], val);
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
err = -EINVAL;
break;
}
- kunmap_atomic(ptr);
+ kunmap_local(ptr);
}
i915_gem_object_finish_access(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index 3bef1beec7..2a0c0634d4 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -24,7 +24,6 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
{
unsigned int needs_clflush;
struct page *page;
- void *map;
u32 *cpu;
int err;
@@ -34,8 +33,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
+ cpu = kmap_local_page(page) + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu));
@@ -45,7 +43,7 @@ static int cpu_set(struct context *ctx, unsigned long offset, u32 v)
if (needs_clflush & CLFLUSH_AFTER)
drm_clflush_virt_range(cpu, sizeof(*cpu));
- kunmap_atomic(map);
+ kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj);
out:
@@ -57,7 +55,6 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
{
unsigned int needs_clflush;
struct page *page;
- void *map;
u32 *cpu;
int err;
@@ -67,15 +64,14 @@ static int cpu_get(struct context *ctx, unsigned long offset, u32 *v)
goto out;
page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT);
- map = kmap_atomic(page);
- cpu = map + offset_in_page(offset);
+ cpu = kmap_local_page(page) + offset_in_page(offset);
if (needs_clflush & CLFLUSH_BEFORE)
drm_clflush_virt_range(cpu, sizeof(*cpu));
*v = *cpu;
- kunmap_atomic(map);
+ kunmap_local(cpu);
i915_gem_object_finish_access(ctx->obj);
out:
@@ -85,6 +81,7 @@ out:
static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -99,7 +96,7 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -112,12 +109,13 @@ static int gtt_set(struct context *ctx, unsigned long offset, u32 v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
u32 __iomem *map;
int err = 0;
@@ -132,7 +130,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
@@ -145,7 +143,7 @@ static int gtt_get(struct context *ctx, unsigned long offset, u32 *v)
i915_vma_unpin_iomap(vma);
out_rpm:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7021b6e9b2..89d4dc8b60 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -489,12 +489,12 @@ static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
for (n = 0; n < real_page_count(obj); n++) {
u32 *map;
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ map = kmap_local_page(i915_gem_object_get_page(obj, n));
for (m = 0; m < DW_PER_PAGE; m++)
map[m] = value;
if (!has_llc)
drm_clflush_virt_range(map, PAGE_SIZE);
- kunmap_atomic(map);
+ kunmap_local(map);
}
i915_gem_object_finish_access(obj);
@@ -520,7 +520,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
for (n = 0; n < real_page_count(obj); n++) {
u32 *map, m;
- map = kmap_atomic(i915_gem_object_get_page(obj, n));
+ map = kmap_local_page(i915_gem_object_get_page(obj, n));
if (needs_flush & CLFLUSH_BEFORE)
drm_clflush_virt_range(map, PAGE_SIZE);
@@ -546,7 +546,7 @@ static noinline int cpu_check(struct drm_i915_gem_object *obj,
}
out_unmap:
- kunmap_atomic(map);
+ kunmap_local(map);
if (err)
break;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
index e57f939007..d684a70f2c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_dmabuf.c
@@ -504,7 +504,7 @@ static int igt_dmabuf_export_vmap(void *arg)
}
if (memchr_inv(ptr, 0, dmabuf->size)) {
- pr_err("Exported object not initialiased to zero!\n");
+ pr_err("Exported object not initialised to zero!\n");
err = -EINVAL;
goto out;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 72957a36a3..2c51a2c452 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -630,14 +630,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
static void disable_retire_worker(struct drm_i915_private *i915)
{
i915_gem_driver_unregister__shrinker(i915);
- intel_gt_pm_get(to_gt(i915));
+ intel_gt_pm_get_untracked(to_gt(i915));
cancel_delayed_work_sync(&to_gt(i915)->requests.retire_work);
}
static void restore_retire_worker(struct drm_i915_private *i915)
{
igt_flush_test(i915);
- intel_gt_pm_put(to_gt(i915));
+ intel_gt_pm_put_untracked(to_gt(i915));
i915_gem_driver_register__shrinker(i915);
}
@@ -778,6 +778,7 @@ err_obj:
static int gtt_set(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -786,7 +787,7 @@ static int gtt_set(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -798,12 +799,13 @@ static int gtt_set(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
static int gtt_check(struct drm_i915_gem_object *obj)
{
+ intel_wakeref_t wakeref;
struct i915_vma *vma;
void __iomem *map;
int err = 0;
@@ -812,7 +814,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
if (IS_ERR(vma))
return PTR_ERR(vma);
- intel_gt_pm_get(vma->vm->gt);
+ wakeref = intel_gt_pm_get(vma->vm->gt);
map = i915_vma_pin_iomap(vma);
i915_vma_unpin(vma);
if (IS_ERR(map)) {
@@ -828,7 +830,7 @@ static int gtt_check(struct drm_i915_gem_object *obj)
i915_vma_unpin_iomap(vma);
out:
- intel_gt_pm_put(vma->vm->gt);
+ intel_gt_pm_put(vma->vm->gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index e199d7dbb8..2b0327cc47 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -83,7 +83,7 @@ live_context(struct drm_i915_private *i915, struct file *file)
int err;
u32 id;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(fpriv, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
@@ -152,7 +152,7 @@ kernel_context(struct drm_i915_private *i915,
struct i915_gem_context *ctx;
struct i915_gem_proto_context *pc;
- pc = proto_context_create(i915, 0);
+ pc = proto_context_create(NULL, i915, 0);
if (IS_ERR(pc))
return ERR_CAST(pc);
diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
index 86a04afff6..e1bf13e3d3 100644
--- a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c
@@ -226,7 +226,7 @@ u32 *gen12_emit_aux_table_inv(struct intel_engine_cs *engine, u32 *cs)
static int mtl_dummy_pipe_control(struct i915_request *rq)
{
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 71)) ||
+ if (IS_GFX_GT_IP_RANGE(rq->engine->gt, IP_VER(12, 70), IP_VER(12, 74)) ||
IS_DG2(rq->i915)) {
u32 *cs;
@@ -822,7 +822,7 @@ u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
flags |= PIPE_CONTROL_FLUSH_L3;
/* Wa_14016712196 */
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
/* dummy PIPE_CONTROL + depth flush */
cs = gen12_emit_pipe_control(cs, 0,
PIPE_CONTROL_DEPTH_CACHE_FLUSH, 0);
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 9895e18df0..81bf221637 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -5,6 +5,7 @@
#include <linux/log2.h>
+#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "gen8_ppgtt.h"
@@ -222,6 +223,9 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+ if (vm->rsvd.obj)
+ i915_gem_object_put(vm->rsvd.obj);
+
if (intel_vgpu_active(vm->i915))
gen8_ppgtt_notify_vgt(ppgtt, false);
@@ -950,6 +954,44 @@ err_pd:
return ERR_PTR(err);
}
+static int gen8_init_rsvd(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int ret;
+
+ if (!intel_gt_needs_wa_16018031267(vm->gt))
+ return 0;
+
+ /* The memory will be used only by GPU. */
+ obj = i915_gem_object_create_lmem(i915, PAGE_SIZE,
+ I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_GPU_ONLY);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto unref;
+ }
+
+ ret = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (ret)
+ goto unref;
+
+ vm->rsvd.vma = i915_vma_make_unshrinkable(vma);
+ vm->rsvd.obj = obj;
+ vm->total -= vma->node.size;
+ return 0;
+unref:
+ i915_gem_object_put(obj);
+ return ret;
+}
+
/*
* GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
* with a net effect resembling a 2-level page table in normal x86 terms. Each
@@ -1031,6 +1073,10 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
+ err = gen8_init_rsvd(&ppgtt->vm);
+ if (err)
+ goto err_put;
+
return ppgtt;
err_put:
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index ecc990ec1b..d650beb8ed 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -28,11 +28,14 @@ static void irq_disable(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref;
+
/*
* Since we are waiting on a request, the GPU should be busy
* and should have its own rpm reference.
*/
- if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt)))
+ wakeref = intel_gt_pm_get_if_awake(b->irq_engine->gt);
+ if (GEM_WARN_ON(!wakeref))
return;
/*
@@ -41,7 +44,7 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
* which we can add a new waiter and avoid the cost of re-enabling
* the irq.
*/
- WRITE_ONCE(b->irq_armed, true);
+ WRITE_ONCE(b->irq_armed, wakeref);
/* Requests may have completed before we could enable the interrupt. */
if (!b->irq_enabled++ && b->irq_enable(b))
@@ -61,12 +64,14 @@ static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b)
static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
{
+ intel_wakeref_t wakeref = b->irq_armed;
+
GEM_BUG_ON(!b->irq_enabled);
if (!--b->irq_enabled)
b->irq_disable(b);
- WRITE_ONCE(b->irq_armed, false);
- intel_gt_pm_put_async(b->irq_engine->gt);
+ WRITE_ONCE(b->irq_armed, 0);
+ intel_gt_pm_put_async(b->irq_engine->gt, wakeref);
}
static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b)
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
index 72dfd3748c..bdf09fd67b 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h
@@ -13,6 +13,7 @@
#include <linux/types.h>
#include "intel_engine_types.h"
+#include "intel_wakeref.h"
/*
* Rather than have every client wait upon all user interrupts,
@@ -43,7 +44,7 @@ struct intel_breadcrumbs {
spinlock_t irq_lock; /* protects the interrupt from hardirq context */
struct irq_work irq_work; /* for use from inside irq_lock */
unsigned int irq_enabled;
- bool irq_armed;
+ intel_wakeref_t irq_armed;
/* Not all breadcrumbs are attached to physical HW */
intel_engine_mask_t engine_mask;
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index a53b26178f..a2f1245741 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_context.h"
#include "gem/i915_gem_pm.h"
+#include "i915_drm_client.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -50,6 +51,7 @@ intel_context_create(struct intel_engine_cs *engine)
int intel_context_alloc_state(struct intel_context *ce)
{
+ struct i915_gem_context *ctx;
int err = 0;
if (mutex_lock_interruptible(&ce->pin_mutex))
@@ -66,6 +68,18 @@ int intel_context_alloc_state(struct intel_context *ce)
goto unlock;
set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
+
+ rcu_read_lock();
+ ctx = rcu_dereference(ce->gem_context);
+ if (ctx && !kref_get_unless_zero(&ctx->ref))
+ ctx = NULL;
+ rcu_read_unlock();
+ if (ctx) {
+ if (ctx->client)
+ i915_drm_client_add_context_objects(ctx->client,
+ ce);
+ i915_gem_context_put(ctx);
+ }
}
unlock:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index a80e3b7c24..25564c0150 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -212,7 +212,7 @@ static inline void intel_context_enter(struct intel_context *ce)
return;
ce->ops->enter(ce);
- intel_gt_pm_get(ce->vm->gt);
+ ce->wakeref = intel_gt_pm_get(ce->vm->gt);
}
static inline void intel_context_mark_active(struct intel_context *ce)
@@ -229,7 +229,7 @@ static inline void intel_context_exit(struct intel_context *ce)
if (--ce->active_count)
return;
- intel_gt_pm_put_async(ce->vm->gt);
+ intel_gt_pm_put_async(ce->vm->gt, ce->wakeref);
ce->ops->exit(ce);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index aceaac28a3..7eccbd70d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -17,6 +17,7 @@
#include "i915_utils.h"
#include "intel_engine_types.h"
#include "intel_sseu.h"
+#include "intel_wakeref.h"
#include "uc/intel_guc_fwif.h"
@@ -112,6 +113,7 @@ struct intel_context {
u32 ring_size;
struct intel_ring *ring;
struct intel_timeline *timeline;
+ intel_wakeref_t wakeref;
unsigned long flags;
#define CONTEXT_BARRIER_BIT 0
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4a11219e56..84be97f959 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -47,7 +47,7 @@
#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
#define GEN11_LR_CONTEXT_RENDER_SIZE (14 * PAGE_SIZE)
-#define GEN8_LR_CONTEXT_OTHER_SIZE ( 2 * PAGE_SIZE)
+#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
#define MAX_MMIO_BASES 3
struct engine_info {
@@ -908,6 +908,23 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
info->engine_mask &= ~BIT(GSC0);
}
+ /*
+ * Do not create the command streamer for CCS slices beyond the first.
+ * All the workload submitted to the first engine will be shared among
+ * all the slices.
+ *
+ * Once the user will be allowed to customize the CCS mode, then this
+ * check needs to be removed.
+ */
+ if (IS_DG2(gt->i915)) {
+ u8 first_ccs = __ffs(CCS_MASK(gt));
+
+ /* Mask off all the CCS engine */
+ info->engine_mask &= ~GENMASK(CCS3, CCS0);
+ /* Put back in the first CCS engine */
+ info->engine_mask |= BIT(_CCS(first_ccs));
+ }
+
return info->engine_mask;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 9a527e1f5b..1a8e2b7db0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -188,7 +188,7 @@ static void heartbeat(struct work_struct *wrk)
* low latency and no jitter] the chance to naturally
* complete before being preempted.
*/
- attr.priority = 0;
+ attr.priority = I915_PRIORITY_NORMAL;
if (rq->sched.attr.priority >= attr.priority)
attr.priority = I915_PRIORITY_HEARTBEAT;
if (rq->sched.attr.priority >= attr.priority)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 5a3a5b29d1..fb7bff27b4 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -63,7 +63,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
ENGINE_TRACE(engine, "\n");
- intel_gt_pm_get(engine->gt);
+ engine->wakeref_track = intel_gt_pm_get(engine->gt);
/* Discard stale context state from across idling */
ce = engine->kernel_context;
@@ -122,6 +122,7 @@ __queue_and_release_pm(struct i915_request *rq,
*/
GEM_BUG_ON(rq->context->active_count != 1);
__intel_gt_pm_get(engine->gt);
+ rq->context->wakeref = intel_wakeref_track(&engine->gt->wakeref);
/*
* We have to serialise all potential retirement paths with our
@@ -282,7 +283,7 @@ static int __engine_park(struct intel_wakeref *wf)
engine->park(engine);
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async(engine->gt, engine->wakeref_track);
return 0;
}
@@ -293,7 +294,7 @@ static const struct intel_wakeref_ops wf_ops = {
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops);
+ intel_wakeref_init(&engine->wakeref, engine->i915, &wf_ops, engine->name);
intel_engine_init_heartbeat(engine);
intel_gsc_idle_msg_enable(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index d68675925b..1d97c435a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -10,6 +10,7 @@
#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
+#include "intel_gt.h"
#include "intel_gt_pm.h"
static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
index fdd4ddd3a9..a8eac59e37 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h
@@ -118,9 +118,15 @@
#define CCID_EXTENDED_STATE_RESTORE BIT(2)
#define CCID_EXTENDED_STATE_SAVE BIT(3)
#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */
+#define PER_CTX_BB_FORCE BIT(2)
+#define PER_CTX_BB_VALID BIT(0)
+
#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */
#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */
#define ECOSKPD(base) _MMIO((base) + 0x1d0)
+#define XEHP_BLITTER_SCHEDULING_MODE_MASK REG_GENMASK(12, 11)
+#define XEHP_BLITTER_ROUND_ROBIN_MODE \
+ REG_FIELD_PREP(XEHP_BLITTER_SCHEDULING_MODE_MASK, 1)
#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4)
#define ECO_GATING_CX_ONLY REG_BIT(3)
#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3)
@@ -257,5 +263,7 @@
#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18)
#define ALNUNIT_CLKGATE_DIS REG_BIT(13)
+#define VDBOX_CGCTL3F1C(base) _MMIO((base) + 0x3f1c)
+#define MFXPIPE_CLKGATE_DIS REG_BIT(3)
#endif /* __INTEL_ENGINE_REGS__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 8769760257..960e6be204 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -446,7 +446,9 @@ struct intel_engine_cs {
unsigned long serial;
unsigned long wakeref_serial;
+ intel_wakeref_t wakeref_track;
struct intel_wakeref wakeref;
+
struct file *default_state;
struct {
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 42e09f1589..b061a0a0d6 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -630,7 +630,7 @@ static void __execlists_schedule_out(struct i915_request * const rq,
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
if (engine->fw_domain && !--engine->fw_active)
intel_uncore_forcewake_put(engine->uncore, engine->fw_domain);
- intel_gt_pm_put_async(engine->gt);
+ intel_gt_pm_put_async_untracked(engine->gt);
/*
* If this is part of a virtual engine, its next request may
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 15fc8e4703..21a7e3191c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -245,16 +245,15 @@ static void guc_ggtt_invalidate(struct i915_ggtt *ggtt)
gen8_ggtt_invalidate(ggtt);
list_for_each_entry(gt, &ggtt->gt_list, ggtt_link) {
- if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc)) {
+ if (intel_guc_tlb_invalidation_is_available(&gt->uc.guc))
guc_ggtt_ct_invalidate(gt);
- } else if (GRAPHICS_VER(i915) >= 12) {
+ else if (GRAPHICS_VER(i915) >= 12)
intel_uncore_write_fw(gt->uncore,
GEN12_GUC_TLB_INV_CR,
GEN12_GUC_TLB_INV_CR_INVALIDATE);
- } else {
+ else
intel_uncore_write_fw(gt->uncore,
GEN8_GTCR, GEN8_GTCR_INVALIDATE);
- }
}
}
@@ -297,7 +296,7 @@ static bool should_update_ggtt_with_bind(struct i915_ggtt *ggtt)
return intel_gt_is_bind_context_ready(gt);
}
-static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
+static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt, intel_wakeref_t *wakeref)
{
struct intel_context *ce;
struct intel_gt *gt = ggtt->vm.gt;
@@ -314,7 +313,8 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
* would conflict with fs_reclaim trying to allocate memory while
* doing rpm_resume().
*/
- if (!intel_gt_pm_get_if_awake(gt))
+ *wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!*wakeref)
return NULL;
intel_engine_pm_get(ce->engine);
@@ -322,10 +322,10 @@ static struct intel_context *gen8_ggtt_bind_get_ce(struct i915_ggtt *ggtt)
return ce;
}
-static void gen8_ggtt_bind_put_ce(struct intel_context *ce)
+static void gen8_ggtt_bind_put_ce(struct intel_context *ce, intel_wakeref_t wakeref)
{
intel_engine_pm_put(ce->engine);
- intel_gt_pm_put(ce->engine->gt);
+ intel_gt_pm_put(ce->engine->gt, wakeref);
}
static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
@@ -338,12 +338,13 @@ static bool gen8_ggtt_bind_ptes(struct i915_ggtt *ggtt, u32 offset,
struct sgt_iter iter;
struct i915_request *rq;
struct intel_context *ce;
+ intel_wakeref_t wakeref;
u32 *cs;
if (!num_entries)
return true;
- ce = gen8_ggtt_bind_get_ce(ggtt);
+ ce = gen8_ggtt_bind_get_ce(ggtt, &wakeref);
if (!ce)
return false;
@@ -419,13 +420,13 @@ queue_err_rq:
offset += n_ptes;
}
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return true;
err_rq:
i915_request_put(rq);
put_ce:
- gen8_ggtt_bind_put_ce(ce);
+ gen8_ggtt_bind_put_ce(ce, wakeref);
return false;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.h b/drivers/gpu/drm/i915/gt/intel_gsc.h
index 7ab3ca0f9f..013c642514 100644
--- a/drivers/gpu/drm/i915/gt/intel_gsc.h
+++ b/drivers/gpu/drm/i915/gt/intel_gsc.h
@@ -21,8 +21,11 @@ struct mei_aux_device;
/**
* struct intel_gsc - graphics security controller
*
- * @gem_obj: scratch memory GSC operations
- * @intf : gsc interface
+ * @intf: gsc interface
+ * @intf.adev: MEI aux. device for this @intf
+ * @intf.gem_obj: scratch memory GSC operations
+ * @intf.irq: IRQ for this device (%-1 for no IRQ)
+ * @intf.id: this interface's id number/index
*/
struct intel_gsc {
struct intel_gsc_intf {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ba1186fc52..6a2c2718bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -451,7 +451,7 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt)
spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_posting_read_fw(uncore,
- RING_HEAD(RENDER_RING_BASE));
+ RING_TAIL(RENDER_RING_BASE));
spin_unlock_irqrestore(&uncore->lock, flags);
}
}
@@ -1024,6 +1024,12 @@ enum i915_map_type intel_gt_coherent_map_type(struct intel_gt *gt,
return I915_MAP_WC;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ return IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 55), IP_VER(12, 71));
+}
+
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt)
{
return MEDIA_VER_FULL(gt->i915) == IP_VER(13, 0) && gt->type == GT_MEDIA;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 970bedf6b7..003eb93b82 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -87,8 +87,13 @@ static inline bool gt_is_root(struct intel_gt *gt)
return !gt->info.id;
}
+bool intel_gt_needs_wa_16018031267(struct intel_gt *gt);
bool intel_gt_needs_wa_22016122933(struct intel_gt *gt);
+#define NEEDS_FASTCOLOR_BLT_WABB(engine) ( \
+ intel_gt_needs_wa_16018031267(engine->gt) && \
+ engine->class == COPY_ENGINE_CLASS && engine->instance == 0)
+
static inline struct intel_gt *uc_to_gt(struct intel_uc *uc)
{
return container_of(uc, struct intel_gt, uc);
@@ -114,6 +119,11 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
+static inline struct drm_i915_private *guc_to_i915(struct intel_guc *guc)
+{
+ return guc_to_gt(guc)->i915;
+}
+
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
@@ -167,6 +177,20 @@ void intel_gt_release_all(struct drm_i915_private *i915);
(id__)++) \
for_each_if(((gt__) = (i915__)->gt[(id__)]))
+/* Simple iterator over all initialised engines */
+#define for_each_engine(engine__, gt__, id__) \
+ for ((id__) = 0; \
+ (id__) < I915_NUM_ENGINES; \
+ (id__)++) \
+ for_each_if ((engine__) = (gt__)->engine[(id__)])
+
+/* Iterator over subset of engines selected by mask */
+#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
+ for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
+ (tmp__) ? \
+ ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
+ 0;)
+
void intel_gt_info_print(const struct intel_gt_info *info,
struct drm_printer *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
new file mode 100644
index 0000000000..044219c596
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.c
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
+#include "intel_gt_regs.h"
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt)
+{
+ int cslice;
+ u32 mode = 0;
+ int first_ccs = __ffs(CCS_MASK(gt));
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /* Build the value for the fixed CCS load balancing */
+ for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
+ if (CCS_MASK(gt) & BIT(cslice))
+ /*
+ * If available, assign the cslice
+ * to the first available engine...
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice, first_ccs);
+
+ else
+ /*
+ * ... otherwise, mark the cslice as
+ * unavailable if no CCS dispatches here
+ */
+ mode |= XEHP_CCS_MODE_CSLICE(cslice,
+ XEHP_CCS_MODE_CSLICE_MASK);
+ }
+
+ intel_uncore_write(gt->uncore, XEHP_CCS_MODE, mode);
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
new file mode 100644
index 0000000000..9e5549caeb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_ccs_mode.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef __INTEL_GT_CCS_MODE_H__
+#define __INTEL_GT_CCS_MODE_H__
+
+struct intel_gt;
+
+void intel_gt_apply_ccs_mode(struct intel_gt *gt);
+
+#endif /* __INTEL_GT_CCS_MODE_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
index 8f9b874fdc..3aa1d014c1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
@@ -6,8 +6,8 @@
#include <drm/drm_print.h>
-#include "i915_drv.h" /* for_each_engine! */
#include "intel_engine.h"
+#include "intel_gt.h"
#include "intel_gt_debugfs.h"
#include "intel_gt_engines_debugfs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
index 34913912d8..e253750a51 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_mcr.c
@@ -388,8 +388,7 @@ void intel_gt_mcr_lock(struct intel_gt *gt, unsigned long *flags)
* registers. This wakeref will be released in the unlock
* routine.
*
- * This is expected to become a formally documented/numbered
- * workaround soon.
+ * Wa_22018931422
*/
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_GT);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index f5899d503e..220ac4f92e 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -28,19 +28,20 @@
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
int count = atomic_read(&gt->user_wakeref);
+ intel_wakeref_t wakeref;
/* Inside suspend/resume so single threaded, no races to worry about. */
if (likely(!count))
return;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
if (suspend) {
GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
atomic_sub(count, &gt->wakeref.count);
} else {
atomic_add(count, &gt->wakeref.count);
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
}
static void runtime_begin(struct intel_gt *gt)
@@ -138,7 +139,7 @@ void intel_gt_pm_init_early(struct intel_gt *gt)
* runtime_pm is per-device rather than per-tile, so this is still the
* correct structure.
*/
- intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops);
+ intel_wakeref_init(&gt->wakeref, gt->i915, &wf_ops, "GT");
seqcount_mutex_init(&gt->stats.lock, &gt->wakeref.mutex);
}
@@ -167,7 +168,7 @@ static void gt_sanitize(struct intel_gt *gt, bool force)
enum intel_engine_id id;
intel_wakeref_t wakeref;
- GT_TRACE(gt, "force:%s", str_yes_no(force));
+ GT_TRACE(gt, "force:%s\n", str_yes_no(force));
/* Use a raw wakeref to avoid calling intel_display_power_get early */
wakeref = intel_runtime_pm_get(gt->uncore->rpm);
@@ -236,6 +237,7 @@ int intel_gt_resume(struct intel_gt *gt)
{
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err;
err = intel_gt_has_unrecoverable_error(gt);
@@ -252,7 +254,7 @@ int intel_gt_resume(struct intel_gt *gt)
*/
gt_sanitize(gt, true);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
intel_rc6_sanitize(&gt->rc6);
@@ -295,7 +297,7 @@ int intel_gt_resume(struct intel_gt *gt)
out_fw:
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
intel_gt_bind_context_set_ready(gt);
return err;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index b1eeb5b339..911fd01602 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -16,19 +16,28 @@ static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
return intel_wakeref_is_active(&gt->wakeref);
}
-static inline void intel_gt_pm_get(struct intel_gt *gt)
+static inline void intel_gt_pm_get_untracked(struct intel_gt *gt)
{
intel_wakeref_get(&gt->wakeref);
}
+static inline intel_wakeref_t intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_gt_pm_get_untracked(gt);
+ return intel_wakeref_track(&gt->wakeref);
+}
+
static inline void __intel_gt_pm_get(struct intel_gt *gt)
{
__intel_wakeref_get(&gt->wakeref);
}
-static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
+static inline intel_wakeref_t intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
- return intel_wakeref_get_if_active(&gt->wakeref);
+ if (!intel_wakeref_get_if_active(&gt->wakeref))
+ return 0;
+
+ return intel_wakeref_track(&gt->wakeref);
}
static inline void intel_gt_pm_might_get(struct intel_gt *gt)
@@ -36,12 +45,18 @@ static inline void intel_gt_pm_might_get(struct intel_gt *gt)
intel_wakeref_might_get(&gt->wakeref);
}
-static inline void intel_gt_pm_put(struct intel_gt *gt)
+static inline void intel_gt_pm_put_untracked(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
}
-static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+static inline void intel_gt_pm_put(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_untracked(gt);
+}
+
+static inline void intel_gt_pm_put_async_untracked(struct intel_gt *gt)
{
intel_wakeref_put_async(&gt->wakeref);
}
@@ -51,9 +66,14 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
intel_wakeref_might_put(&gt->wakeref);
}
-#define with_intel_gt_pm(gt, tmp) \
- for (tmp = 1, intel_gt_pm_get(gt); tmp; \
- intel_gt_pm_put(gt), tmp = 0)
+static inline void intel_gt_pm_put_async(struct intel_gt *gt, intel_wakeref_t handle)
+{
+ intel_wakeref_untrack(&gt->wakeref, handle);
+ intel_gt_pm_put_async_untracked(gt);
+}
+
+#define with_intel_gt_pm(gt, wf) \
+ for (wf = intel_gt_pm_get(gt); wf; intel_gt_pm_put(gt, wf), wf = 0)
/**
* with_intel_gt_pm_if_awake - if GT is PM awake, get a reference to prevent
@@ -64,7 +84,7 @@ static inline void intel_gt_pm_might_put(struct intel_gt *gt)
* @wf: pointer to a temporary wakeref.
*/
#define with_intel_gt_pm_if_awake(gt, wf) \
- for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt), wf = 0)
+ for (wf = intel_gt_pm_get_if_awake(gt); wf; intel_gt_pm_put_async(gt, wf), wf = 0)
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index f900cc68d6..7114c116e9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -27,7 +27,7 @@
void intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
{
atomic_inc(&gt->user_wakeref);
- intel_gt_pm_get(gt);
+ intel_gt_pm_get_untracked(gt);
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_get(gt->uncore);
}
@@ -36,7 +36,7 @@ void intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
{
if (GRAPHICS_VER(gt->i915) >= 6)
intel_uncore_forcewake_user_put(gt->uncore);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put_untracked(gt);
atomic_dec(&gt->user_wakeref);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index eecd0a87a6..743fe35667 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -469,6 +469,9 @@
#define XEHP_PSS_MODE2 MCR_REG(0x703c)
#define SCOREBOARD_STALL_FLUSH_CONTROL REG_BIT(5)
+#define XEHP_PSS_CHICKEN MCR_REG(0x7044)
+#define FD_END_COLLECT REG_BIT(5)
+
#define GEN7_SC_INSTDONE _MMIO(0x7100)
#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
@@ -537,6 +540,9 @@
#define XEHP_SQCM MCR_REG(0x8724)
#define EN_32B_ACCESS REG_BIT(30)
+#define MTL_GSCPSMI_BASEADDR_LSB _MMIO(0x880c)
+#define MTL_GSCPSMI_BASEADDR_MSB _MMIO(0x8810)
+
#define HSW_IDICR _MMIO(0x9008)
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
@@ -1471,8 +1477,14 @@
#define ECOBITS_PPGTT_CACHE4B (0 << 8)
#define GEN12_RCU_MODE _MMIO(0x14800)
+#define XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE REG_BIT(1)
#define GEN12_RCU_MODE_CCS_ENABLE REG_BIT(0)
+#define XEHP_CCS_MODE _MMIO(0x14804)
+#define XEHP_CCS_MODE_CSLICE_MASK REG_GENMASK(2, 0) /* CCS0-3 + rsvd */
+#define XEHP_CCS_MODE_CSLICE_WIDTH ilog2(XEHP_CCS_MODE_CSLICE_MASK + 1)
+#define XEHP_CCS_MODE_CSLICE(cslice, ccs) (ccs << (cslice * XEHP_CCS_MODE_CSLICE_WIDTH))
+
#define CHV_FUSE_GT _MMIO(VLV_GUNIT_BASE + 0x2168)
#define CHV_FGT_DISABLE_SS0 (1 << 10)
#define CHV_FGT_DISABLE_SS1 (1 << 11)
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 4fbed27ef0..86f73fe558 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -63,6 +63,9 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -84,6 +87,9 @@ struct drm_i915_gem_object *alloc_pt_dma(struct i915_address_space *vm, int sz)
if (!IS_ERR(obj)) {
obj->base.resv = i915_vm_resv_get(vm);
obj->shares_resv_from = vm;
+
+ if (vm->fpriv)
+ i915_drm_client_add_object(vm->fpriv->client, obj);
}
return obj;
@@ -95,6 +101,16 @@ int map_pt_dma(struct i915_address_space *vm, struct drm_i915_gem_object *obj)
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map_unlocked(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
@@ -109,6 +125,16 @@ int map_pt_dma_locked(struct i915_address_space *vm, struct drm_i915_gem_object
void *vaddr;
type = intel_gt_coherent_map_type(vm->gt, obj, true);
+ /*
+ * FIXME: It is suspected that some Address Translation Service (ATS)
+ * issue on IOMMU is causing CAT errors to occur on some MTL workloads.
+ * Applying a write barrier to the ppgtt set entry functions appeared
+ * to have no effect, so we must temporarily use I915_MAP_WC here on
+ * MTL until a proper ATS solution is found.
+ */
+ if (IS_METEORLAKE(vm->i915))
+ type = I915_MAP_WC;
+
vaddr = i915_gem_object_pin_map(obj, type);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index b471edac26..6b85222ee3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -249,8 +249,13 @@ struct i915_address_space {
struct work_struct release_work;
struct drm_mm mm;
+ struct {
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ } rsvd;
struct intel_gt *gt;
struct drm_i915_private *i915;
+ struct drm_i915_file_private *fpriv;
struct device *dma;
u64 total; /* size addr space maps (ex. 2GB for ggtt) */
u64 reserved; /* size addr space reserved */
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eaf66d9031..7c367ba8d9 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -829,6 +829,18 @@ lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine)
}
static void
+lrc_setup_bb_per_ctx(u32 *regs,
+ const struct intel_engine_cs *engine,
+ u32 ctx_bb_ggtt_addr)
+{
+ GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1);
+ regs[lrc_ring_wa_bb_per_ctx(engine) + 1] =
+ ctx_bb_ggtt_addr |
+ PER_CTX_BB_FORCE |
+ PER_CTX_BB_VALID;
+}
+
+static void
lrc_setup_indirect_ctx(u32 *regs,
const struct intel_engine_cs *engine,
u32 ctx_bb_ggtt_addr,
@@ -1020,7 +1032,13 @@ static u32 context_wa_bb_offset(const struct intel_context *ce)
return PAGE_SIZE * ce->wa_bb_page;
}
-static u32 *context_indirect_bb(const struct intel_context *ce)
+/*
+ * per_ctx below determines which WABB section is used.
+ * When true, the function returns the location of the
+ * PER_CTX_BB. When false, the function returns the
+ * location of the INDIRECT_CTX.
+ */
+static u32 *context_wabb(const struct intel_context *ce, bool per_ctx)
{
void *ptr;
@@ -1029,6 +1047,7 @@ static u32 *context_indirect_bb(const struct intel_context *ce)
ptr = ce->lrc_reg_state;
ptr -= LRC_STATE_OFFSET; /* back to start of context image */
ptr += context_wa_bb_offset(ce);
+ ptr += per_ctx ? PAGE_SIZE : 0;
return ptr;
}
@@ -1105,7 +1124,8 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 12) {
ce->wa_bb_page = context_size / PAGE_SIZE;
- context_size += PAGE_SIZE;
+ /* INDIRECT_CTX and PER_CTX_BB need separate pages. */
+ context_size += PAGE_SIZE * 2;
}
if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
@@ -1407,12 +1427,85 @@ gen12_emit_indirect_ctx_xcs(const struct intel_context *ce, u32 *cs)
return gen12_emit_aux_table_inv(ce->engine, cs);
}
+static u32 *xehp_emit_fastcolor_blt_wabb(const struct intel_context *ce, u32 *cs)
+{
+ struct intel_gt *gt = ce->engine->gt;
+ int mocs = gt->mocs.uc_index << 1;
+
+ /**
+ * Wa_16018031267 / Wa_16018063123 requires that SW forces the
+ * main copy engine arbitration into round robin mode. We
+ * additionally need to submit the following WABB blt command
+ * to produce 4 subblits with each subblit generating 0 byte
+ * write requests as WABB:
+ *
+ * XY_FASTCOLOR_BLT
+ * BG0 -> 5100000E
+ * BG1 -> 0000003F (Dest pitch)
+ * BG2 -> 00000000 (X1, Y1) = (0, 0)
+ * BG3 -> 00040001 (X2, Y2) = (1, 4)
+ * BG4 -> scratch
+ * BG5 -> scratch
+ * BG6-12 -> 00000000
+ * BG13 -> 20004004 (Surf. Width= 2,Surf. Height = 5 )
+ * BG14 -> 00000010 (Qpitch = 4)
+ * BG15 -> 00000000
+ */
+ *cs++ = XY_FAST_COLOR_BLT_CMD | (16 - 2);
+ *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) | 0x3f;
+ *cs++ = 0;
+ *cs++ = 4 << 16 | 1;
+ *cs++ = lower_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = upper_32_bits(i915_vma_offset(ce->vm->rsvd.vma));
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0x20004004;
+ *cs++ = 0x10;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+xehp_emit_per_ctx_bb(const struct intel_context *ce, u32 *cs)
+{
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(ce->engine))
+ cs = xehp_emit_fastcolor_blt_wabb(ce, cs);
+
+ return cs;
+}
+
+static void
+setup_per_ctx_bb(const struct intel_context *ce,
+ const struct intel_engine_cs *engine,
+ u32 *(*emit)(const struct intel_context *, u32 *))
+{
+ /* Place PER_CTX_BB on next page after INDIRECT_CTX */
+ u32 * const start = context_wabb(ce, true);
+ u32 *cs;
+
+ cs = emit(ce, start);
+
+ /* PER_CTX_BB must manually terminate */
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ GEM_BUG_ON(cs - start > I915_GTT_PAGE_SIZE / sizeof(*cs));
+ lrc_setup_bb_per_ctx(ce->lrc_reg_state, engine,
+ lrc_indirect_bb(ce) + PAGE_SIZE);
+}
+
static void
setup_indirect_ctx_bb(const struct intel_context *ce,
const struct intel_engine_cs *engine,
u32 *(*emit)(const struct intel_context *, u32 *))
{
- u32 * const start = context_indirect_bb(ce);
+ u32 * const start = context_wabb(ce, false);
u32 *cs;
cs = emit(ce, start);
@@ -1511,6 +1604,7 @@ u32 lrc_update_regs(const struct intel_context *ce,
/* Mutually exclusive wrt to global indirect bb */
GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size);
setup_indirect_ctx_bb(ce, engine, fn);
+ setup_per_ctx_bb(ce, engine, xehp_emit_per_ctx_bb);
}
return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE;
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index f602895f6d..6a3246240e 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -849,13 +849,12 @@ void intel_sseu_print_topology(struct drm_i915_private *i915,
const struct sseu_dev_info *sseu,
struct drm_printer *p)
{
- if (sseu->max_slices == 0) {
+ if (sseu->max_slices == 0)
drm_printf(p, "Unavailable\n");
- } else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
sseu_print_xehp_topology(sseu, p);
- } else {
+ else
sseu_print_hsw_topology(sseu, p);
- }
}
void intel_sseu_print_ss_info(const char *type,
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 192ac0e59a..59816dd6fb 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -10,6 +10,7 @@
#include "intel_engine_regs.h"
#include "intel_gpu_commands.h"
#include "intel_gt.h"
+#include "intel_gt_ccs_mode.h"
#include "intel_gt_mcr.h"
#include "intel_gt_print.h"
#include "intel_gt_regs.h"
@@ -51,7 +52,8 @@
* registers belonging to BCS, VCS or VECS should be implemented in
* xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
* engine's MMIO range but that are part of of the common RCS/CCS reset domain
- * should be implemented in general_render_compute_wa_init().
+ * should be implemented in general_render_compute_wa_init(). The settings
+ * about the CCS load balancing should be added in ccs_engine_wa_mode().
*
* - GT workarounds: the list of these WAs is applied whenever these registers
* revert to their default values: on GPU reset, suspend/resume [1]_, etc.
@@ -777,6 +779,9 @@ static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
/* Wa_18019271663:dg2 */
wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
+
+ /* Wa_14019877138:dg2 */
+ wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
}
static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
@@ -786,8 +791,13 @@ static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
dg2_ctx_gt_tuning_init(engine, wal);
- if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ /*
+ * Due to Wa_16014892111, the DRAW_WATERMARK tuning must be done in
+ * gen12_emit_indirect_ctx_rcs() rather than here on some early
+ * steppings.
+ */
+ if (!(IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)))
wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
}
@@ -905,7 +915,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
if (engine->class != RENDER_CLASS)
goto done;
- if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_ctx_workarounds_init(engine, wal);
else if (IS_PONTEVECCHIO(i915))
; /* noop; none at this time */
@@ -1640,7 +1650,8 @@ pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
static void
xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- /* Wa_14018778641 / Wa_18018781329 */
+ /* Wa_14018575942 / Wa_18018781329 */
+ wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
/* Wa_22016670082 */
@@ -1663,8 +1674,22 @@ xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
+wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
+{
+ struct intel_engine_cs *engine;
+ int id;
+
+ for_each_engine(engine, gt, id)
+ if (engine->class == VIDEO_DECODE_CLASS)
+ wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
+ MFXPIPE_CLKGATE_DIS);
+}
+
+static void
xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ wa_16021867713(gt, wal);
+
/*
* Wa_14018778641
* Wa_18018781329
@@ -1674,6 +1699,9 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
+ /* Wa_22016670082 */
+ wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
+
debug_dump_steering(gt);
}
@@ -1690,7 +1718,7 @@ xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
*/
static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
{
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
}
@@ -1723,7 +1751,7 @@ gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
return;
}
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_gt_workarounds_init(gt, wal);
else if (IS_PONTEVECCHIO(i915))
pvc_gt_workarounds_init(gt, wal);
@@ -2196,7 +2224,7 @@ void intel_engine_init_whitelist(struct intel_engine_cs *engine)
if (engine->gt->type == GT_MEDIA)
; /* none yet */
- else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
xelpg_whitelist_build(engine);
else if (IS_PONTEVECCHIO(i915))
pvc_whitelist_build(engine);
@@ -2340,14 +2368,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
0, true);
}
- if (IS_DG2_G11(i915) || IS_DG2_G10(i915)) {
- /* Wa_22014600077:dg2 */
- wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_EU_COUNT_FOR_TDL_FLUSH),
- 0 /* Wa_14012342262 write-only reg, so skip verification */,
- true);
- }
-
if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
/*
@@ -2782,6 +2802,11 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
RING_SEMA_WAIT_POLL(engine->mmio_base),
1);
}
+ /* Wa_16018031267, Wa_16018063123 */
+ if (NEEDS_FASTCOLOR_BLT_WABB(engine))
+ wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
+ XEHP_BLITTER_SCHEDULING_MODE_MASK,
+ XEHP_BLITTER_ROUND_ROBIN_MODE);
}
static void
@@ -2811,7 +2836,7 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
{
struct drm_i915_private *i915 = gt->i915;
- if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
+ if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74)) || IS_DG2(i915))
wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
/*
@@ -2827,6 +2852,28 @@ add_render_compute_tuning_settings(struct intel_gt *gt,
wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
}
+static void ccs_engine_wa_mode(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ struct intel_gt *gt = engine->gt;
+
+ if (!IS_DG2(gt->i915))
+ return;
+
+ /*
+ * Wa_14019159160: This workaround, along with others, leads to
+ * significant challenges in utilizing load balancing among the
+ * CCS slices. Consequently, an architectural decision has been
+ * made to completely disable automatic CCS load balancing.
+ */
+ wa_masked_en(wal, GEN12_RCU_MODE, XEHP_RCU_MODE_FIXED_SLICE_CCS_MODE);
+
+ /*
+ * After having disabled automatic load balancing we need to
+ * assign all slices to a single CCS. We will call it CCS mode 1
+ */
+ intel_gt_apply_ccs_mode(gt);
+}
+
/*
* The workarounds in this function apply to shared registers in
* the general render reset domain that aren't tied to a
@@ -2864,7 +2911,8 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
}
if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
- IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
+ IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER) ||
+ IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 74), IP_VER(12, 74)))
/* Wa_14017856879 */
wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
@@ -2915,6 +2963,9 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* Wa_22015475538:dg2
*/
wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
+
+ /* Wa_18028616096 */
+ wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
}
if (IS_DG2_G11(i915)) {
@@ -2943,11 +2994,6 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
true);
}
- if (IS_DG2_G10(i915) || IS_DG2_G12(i915)) {
- /* Wa_18028616096 */
- wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
- }
-
if (IS_XEHPSDV(i915)) {
/* Wa_1409954639 */
wa_mcr_masked_en(wal,
@@ -2978,8 +3024,10 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
* to a single RCS/CCS engine's workaround list since
* they're reset as part of the general render domain reset.
*/
- if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
+ if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) {
general_render_compute_wa_init(engine, wal);
+ ccs_engine_wa_mode(engine, wal);
+ }
if (engine->class == COMPUTE_CLASS)
ccs_engine_wa_init(engine, wal);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
index 86cecf7a11..5ffa5e30f4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c
@@ -21,20 +21,22 @@ static int cmp_u32(const void *A, const void *B)
return *a - *b;
}
-static void perf_begin(struct intel_gt *gt)
+static intel_wakeref_t perf_begin(struct intel_gt *gt)
{
- intel_gt_pm_get(gt);
+ intel_wakeref_t wakeref = intel_gt_pm_get(gt);
/* Boost gpufreq to max [waitboost] and keep it fixed */
atomic_inc(&gt->rps.num_waiters);
queue_work(gt->i915->unordered_wq, &gt->rps.work);
flush_work(&gt->rps.work);
+
+ return wakeref;
}
-static int perf_end(struct intel_gt *gt)
+static int perf_end(struct intel_gt *gt, intel_wakeref_t wakeref)
{
atomic_dec(&gt->rps.num_waiters);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return igt_flush_test(gt->i915);
}
@@ -133,12 +135,13 @@ static int perf_mi_bb_start(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *batch;
@@ -207,7 +210,7 @@ out:
pr_info("%s: MI_BB_START cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
@@ -260,12 +263,13 @@ static int perf_mi_noop(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- perf_begin(gt);
+ wakeref = perf_begin(gt);
for_each_engine(engine, gt, id) {
struct intel_context *ce = engine->kernel_context;
struct i915_vma *base, *nop;
@@ -364,7 +368,7 @@ out:
pr_info("%s: 16K MI_NOOP cycles: %u\n",
engine->name, trifilter(cycles));
}
- if (perf_end(gt))
+ if (perf_end(gt, wakeref))
err = -EIO;
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 273d440a53..bc441ce7b3 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -84,7 +84,7 @@ static struct pulse *pulse_create(void)
static void pulse_unlock_wait(struct pulse *p)
{
- i915_active_unlock_wait(&p->active);
+ wait_var_event_timeout(&p->active, i915_active_is_idle(&p->active), HZ);
}
static int __live_idle_pulse(struct intel_engine_cs *engine,
diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
index 0971241707..33351deeea 100644
--- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c
@@ -81,6 +81,7 @@ static int live_gt_clocks(void *arg)
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
if (!gt->clock_frequency) { /* unknown */
@@ -91,7 +92,7 @@ static int live_gt_clocks(void *arg)
if (GRAPHICS_VER(gt->i915) < 4) /* Any CS_TIMESTAMP? */
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
for_each_engine(engine, gt, id) {
@@ -128,7 +129,7 @@ static int live_gt_clocks(void *arg)
}
intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 5f826b6dcf..e17b8777d2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -1555,7 +1555,7 @@ static int live_lrc_isolation(void *arg)
return err;
}
-static int indirect_ctx_submit_req(struct intel_context *ce)
+static int wabb_ctx_submit_req(struct intel_context *ce)
{
struct i915_request *rq;
int err = 0;
@@ -1579,7 +1579,8 @@ static int indirect_ctx_submit_req(struct intel_context *ce)
#define CTX_BB_CANARY_INDEX (CTX_BB_CANARY_OFFSET / sizeof(u32))
static u32 *
-emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+emit_wabb_ctx_canary(const struct intel_context *ce,
+ u32 *cs, bool per_ctx)
{
*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT |
@@ -1587,26 +1588,43 @@ emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
*cs++ = i915_mmio_reg_offset(RING_START(0));
*cs++ = i915_ggtt_offset(ce->state) +
context_wa_bb_offset(ce) +
- CTX_BB_CANARY_OFFSET;
+ CTX_BB_CANARY_OFFSET +
+ (per_ctx ? PAGE_SIZE : 0);
*cs++ = 0;
return cs;
}
+static u32 *
+emit_indirect_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, false);
+}
+
+static u32 *
+emit_per_ctx_bb_canary(const struct intel_context *ce, u32 *cs)
+{
+ return emit_wabb_ctx_canary(ce, cs, true);
+}
+
static void
-indirect_ctx_bb_setup(struct intel_context *ce)
+wabb_ctx_setup(struct intel_context *ce, bool per_ctx)
{
- u32 *cs = context_indirect_bb(ce);
+ u32 *cs = context_wabb(ce, per_ctx);
cs[CTX_BB_CANARY_INDEX] = 0xdeadf00d;
- setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
+ if (per_ctx)
+ setup_per_ctx_bb(ce, ce->engine, emit_per_ctx_bb_canary);
+ else
+ setup_indirect_ctx_bb(ce, ce->engine, emit_indirect_ctx_bb_canary);
}
-static bool check_ring_start(struct intel_context *ce)
+static bool check_ring_start(struct intel_context *ce, bool per_ctx)
{
const u32 * const ctx_bb = (void *)(ce->lrc_reg_state) -
- LRC_STATE_OFFSET + context_wa_bb_offset(ce);
+ LRC_STATE_OFFSET + context_wa_bb_offset(ce) +
+ (per_ctx ? PAGE_SIZE : 0);
if (ctx_bb[CTX_BB_CANARY_INDEX] == ce->lrc_reg_state[CTX_RING_START])
return true;
@@ -1618,21 +1636,21 @@ static bool check_ring_start(struct intel_context *ce)
return false;
}
-static int indirect_ctx_bb_check(struct intel_context *ce)
+static int wabb_ctx_check(struct intel_context *ce, bool per_ctx)
{
int err;
- err = indirect_ctx_submit_req(ce);
+ err = wabb_ctx_submit_req(ce);
if (err)
return err;
- if (!check_ring_start(ce))
+ if (!check_ring_start(ce, per_ctx))
return -EINVAL;
return 0;
}
-static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
+static int __lrc_wabb_ctx(struct intel_engine_cs *engine, bool per_ctx)
{
struct intel_context *a, *b;
int err;
@@ -1667,14 +1685,14 @@ static int __live_lrc_indirect_ctx_bb(struct intel_engine_cs *engine)
* As ring start is restored apriori of starting the indirect ctx bb and
* as it will be different for each context, it fits to this purpose.
*/
- indirect_ctx_bb_setup(a);
- indirect_ctx_bb_setup(b);
+ wabb_ctx_setup(a, per_ctx);
+ wabb_ctx_setup(b, per_ctx);
- err = indirect_ctx_bb_check(a);
+ err = wabb_ctx_check(a, per_ctx);
if (err)
goto unpin_b;
- err = indirect_ctx_bb_check(b);
+ err = wabb_ctx_check(b, per_ctx);
unpin_b:
intel_context_unpin(b);
@@ -1688,7 +1706,7 @@ put_a:
return err;
}
-static int live_lrc_indirect_ctx_bb(void *arg)
+static int lrc_wabb_ctx(void *arg, bool per_ctx)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -1697,7 +1715,7 @@ static int live_lrc_indirect_ctx_bb(void *arg)
for_each_engine(engine, gt, id) {
intel_engine_pm_get(engine);
- err = __live_lrc_indirect_ctx_bb(engine);
+ err = __lrc_wabb_ctx(engine, per_ctx);
intel_engine_pm_put(engine);
if (igt_flush_test(gt->i915))
@@ -1710,6 +1728,16 @@ static int live_lrc_indirect_ctx_bb(void *arg)
return err;
}
+static int live_lrc_indirect_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, false);
+}
+
+static int live_lrc_per_ctx_bb(void *arg)
+{
+ return lrc_wabb_ctx(arg, true);
+}
+
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
@@ -1947,6 +1975,7 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_garbage),
SUBTEST(live_pphwsp_runtime),
SUBTEST(live_lrc_indirect_ctx_bb),
+ SUBTEST(live_lrc_per_ctx_bb),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 79aa6ac66a..f40de408cd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -261,11 +261,12 @@ static int igt_atomic_reset(void *arg)
{
struct intel_gt *gt = arg;
const typeof(*igt_atomic_phases) *p;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -296,7 +297,7 @@ static int igt_atomic_reset(void *arg)
unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
@@ -307,6 +308,7 @@ static int igt_atomic_engine_reset(void *arg)
const typeof(*igt_atomic_phases) *p;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
int err = 0;
/* Check that the resets are usable from atomic context */
@@ -317,7 +319,7 @@ static int igt_atomic_engine_reset(void *arg)
if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
igt_global_reset_lock(gt);
/* Flush any requests before we get started and check basics */
@@ -365,7 +367,7 @@ static int igt_atomic_engine_reset(void *arg)
out_unlock:
igt_global_reset_unlock(gt);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
return err;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index fb30f733b0..dcef8d4989 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -224,6 +224,7 @@ int live_rps_clock_interval(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
if (!intel_rps_is_enabled(rps) || GRAPHICS_VER(gt->i915) < 6)
@@ -236,7 +237,7 @@ int live_rps_clock_interval(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
intel_rps_disable(&gt->rps);
intel_gt_check_clock_frequency(gt);
@@ -355,7 +356,7 @@ int live_rps_clock_interval(void *arg)
}
intel_rps_enable(&gt->rps);
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -376,6 +377,7 @@ int live_rps_control(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
int err = 0;
/*
@@ -398,7 +400,7 @@ int live_rps_control(void *arg)
saved_work = rps->work.func;
rps->work.func = dummy_rps_work;
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
ktime_t min_dt, max_dt;
@@ -488,7 +490,7 @@ int live_rps_control(void *arg)
break;
}
}
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
@@ -1023,6 +1025,7 @@ int live_rps_interrupt(void *arg)
struct intel_engine_cs *engine;
enum intel_engine_id id;
struct igt_spinner spin;
+ intel_wakeref_t wakeref;
u32 pm_events;
int err = 0;
@@ -1033,9 +1036,9 @@ int live_rps_interrupt(void *arg)
if (!intel_rps_has_interrupts(rps) || GRAPHICS_VER(gt->i915) < 6)
return 0;
- intel_gt_pm_get(gt);
- pm_events = rps->pm_events;
- intel_gt_pm_put(gt);
+ pm_events = 0;
+ with_intel_gt_pm(gt, wakeref)
+ pm_events = rps->pm_events;
if (!pm_events) {
pr_err("No RPS PM events registered, but RPS is enabled?\n");
return -ENODEV;
diff --git a/drivers/gpu/drm/i915/gt/selftest_slpc.c b/drivers/gpu/drm/i915/gt/selftest_slpc.c
index 952c8d52d6..302d054029 100644
--- a/drivers/gpu/drm/i915/gt/selftest_slpc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_slpc.c
@@ -266,6 +266,7 @@ static int run_test(struct intel_gt *gt, int test_type)
struct intel_rps *rps = &gt->rps;
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ intel_wakeref_t wakeref;
struct igt_spinner spin;
u32 slpc_min_freq, slpc_max_freq;
int err = 0;
@@ -311,7 +312,7 @@ static int run_test(struct intel_gt *gt, int test_type)
}
intel_gt_pm_wait_for_idle(gt);
- intel_gt_pm_get(gt);
+ wakeref = intel_gt_pm_get(gt);
for_each_engine(engine, gt, id) {
struct i915_request *rq;
u32 max_act_freq;
@@ -397,7 +398,7 @@ static int run_test(struct intel_gt *gt, int test_type)
if (igt_flush_test(gt->i915))
err = -EIO;
- intel_gt_pm_put(gt);
+ intel_gt_pm_put(gt, wakeref);
igt_spinner_fini(&spin);
intel_gt_pm_wait_for_idle(gt);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
index 5f138de3c1..40817ebcca 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_proxy.c
@@ -322,6 +322,7 @@ static int i915_gsc_proxy_component_bind(struct device *i915_kdev,
gsc->proxy.component = data;
gsc->proxy.component->mei_dev = mei_kdev;
mutex_unlock(&gsc->proxy.mutex);
+ gt_dbg(gt, "GSC proxy mei component bound\n");
return 0;
}
@@ -342,6 +343,7 @@ static void i915_gsc_proxy_component_unbind(struct device *i915_kdev,
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
intel_uncore_rmw(gt->uncore, HECI_H_CSR(MTL_GSC_HECI2_BASE),
HECI_H_CSR_IE | HECI_H_CSR_RST, 0);
+ gt_dbg(gt, "GSC proxy mei component unbound\n");
}
static const struct component_ops i915_gsc_proxy_component_ops = {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 3f3df1166b..2b450c43bb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -330,7 +330,7 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc)
static u32 guc_ctl_devid(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
return (INTEL_DEVID(i915) << 16) | INTEL_REVID(i915);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2b6dfe62c8..813cc888e6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -105,61 +105,67 @@ struct intel_guc {
*/
struct {
/**
- * @lock: protects everything in submission_state,
- * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
- * out of zero
+ * @submission_state.lock: protects everything in
+ * submission_state, ce->guc_id.id, and ce->guc_id.ref
+ * when transitioning in and out of zero
*/
spinlock_t lock;
/**
- * @guc_ids: used to allocate new guc_ids, single-lrc
+ * @submission_state.guc_ids: used to allocate new
+ * guc_ids, single-lrc
*/
struct ida guc_ids;
/**
- * @num_guc_ids: Number of guc_ids, selftest feature to be able
- * to reduce this number while testing.
+ * @submission_state.num_guc_ids: Number of guc_ids, selftest
+ * feature to be able to reduce this number while testing.
*/
int num_guc_ids;
/**
- * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+ * @submission_state.guc_ids_bitmap: used to allocate
+ * new guc_ids, multi-lrc
*/
unsigned long *guc_ids_bitmap;
/**
- * @guc_id_list: list of intel_context with valid guc_ids but no
- * refs
+ * @submission_state.guc_id_list: list of intel_context
+ * with valid guc_ids but no refs
*/
struct list_head guc_id_list;
/**
- * @guc_ids_in_use: Number single-lrc guc_ids in use
+ * @submission_state.guc_ids_in_use: Number single-lrc
+ * guc_ids in use
*/
unsigned int guc_ids_in_use;
/**
- * @destroyed_contexts: list of contexts waiting to be destroyed
- * (deregistered with the GuC)
+ * @submission_state.destroyed_contexts: list of contexts
+ * waiting to be destroyed (deregistered with the GuC)
*/
struct list_head destroyed_contexts;
/**
- * @destroyed_worker: worker to deregister contexts, need as we
- * need to take a GT PM reference and can't from destroy
- * function as it might be in an atomic context (no sleeping)
+ * @submission_state.destroyed_worker: worker to deregister
+ * contexts, need as we need to take a GT PM reference and
+ * can't from destroy function as it might be in an atomic
+ * context (no sleeping)
*/
struct work_struct destroyed_worker;
/**
- * @reset_fail_worker: worker to trigger a GT reset after an
- * engine reset fails
+ * @submission_state.reset_fail_worker: worker to trigger
+ * a GT reset after an engine reset fails
*/
struct work_struct reset_fail_worker;
/**
- * @reset_fail_mask: mask of engines that failed to reset
+ * @submission_state.reset_fail_mask: mask of engines that
+ * failed to reset
*/
intel_engine_mask_t reset_fail_mask;
/**
- * @sched_disable_delay_ms: schedule disable delay, in ms, for
- * contexts
+ * @submission_state.sched_disable_delay_ms: schedule
+ * disable delay, in ms, for contexts
*/
unsigned int sched_disable_delay_ms;
/**
- * @sched_disable_gucid_threshold: threshold of min remaining available
- * guc_ids before we start bypassing the schedule disable delay
+ * @submission_state.sched_disable_gucid_threshold:
+ * threshold of min remaining available guc_ids before
+ * we start bypassing the schedule disable delay
*/
unsigned int sched_disable_gucid_threshold;
} submission_state;
@@ -243,37 +249,40 @@ struct intel_guc {
*/
struct {
/**
- * @lock: Lock protecting the below fields and the engine stats.
+ * @timestamp.lock: Lock protecting the below fields and
+ * the engine stats.
*/
spinlock_t lock;
/**
- * @gt_stamp: 64 bit extended value of the GT timestamp.
+ * @timestamp.gt_stamp: 64-bit extended value of the GT
+ * timestamp.
*/
u64 gt_stamp;
/**
- * @ping_delay: Period for polling the GT timestamp for
- * overflow.
+ * @timestamp.ping_delay: Period for polling the GT
+ * timestamp for overflow.
*/
unsigned long ping_delay;
/**
- * @work: Periodic work to adjust GT timestamp, engine and
- * context usage for overflows.
+ * @timestamp.work: Periodic work to adjust GT timestamp,
+ * engine and context usage for overflows.
*/
struct delayed_work work;
/**
- * @shift: Right shift value for the gpm timestamp
+ * @timestamp.shift: Right shift value for the gpm timestamp
*/
u32 shift;
/**
- * @last_stat_jiffies: jiffies at last actual stats collection time
- * We use this timestamp to ensure we don't oversample the
- * stats because runtime power management events can trigger
- * stats collection at much higher rates than required.
+ * @timestamp.last_stat_jiffies: jiffies at last actual
+ * stats collection time. We use this timestamp to ensure
+ * we don't oversample the stats because runtime power
+ * management events can trigger stats collection at much
+ * higher rates than required.
*/
unsigned long last_stat_jiffies;
} timestamp;
@@ -297,6 +306,10 @@ struct intel_guc {
* @number_guc_id_stolen: The number of guc_ids that have been stolen
*/
int number_guc_id_stolen;
+ /**
+ * @fast_response_selftest: Backdoor to CT handler for fast response selftest
+ */
+ u32 fast_response_selftest;
#endif
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
index a4da0208c8..a1cd40d805 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_capture.c
@@ -355,7 +355,7 @@ guc_capture_alloc_steered_lists(struct intel_guc *guc,
static const struct __guc_mmio_reg_descr_group *
guc_capture_get_device_reglist(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
const struct __guc_mmio_reg_descr_group *lists;
if (GRAPHICS_VER(i915) >= 12)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 89e314b375..0d5197c082 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -265,7 +265,7 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
u32 *cmds;
int err;
- err = i915_inject_probe_error(guc_to_gt(guc)->i915, -ENXIO);
+ err = i915_inject_probe_error(guc_to_i915(guc), -ENXIO);
if (err)
return err;
@@ -1076,6 +1076,15 @@ static int ct_handle_response(struct intel_guc_ct *ct, struct ct_incoming_msg *r
found = true;
break;
}
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (!found && ct_to_guc(ct)->fast_response_selftest) {
+ CT_DEBUG(ct, "Assuming unsolicited response due to FAST_REQUEST selftest\n");
+ ct_to_guc(ct)->fast_response_selftest++;
+ found = true;
+ }
+#endif
+
if (!found) {
CT_ERROR(ct, "Unsolicited response message: len %u, data %#x (fence %u, last %u)\n",
len, hxg[0], fence, ct->requests.last_fence);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
index 55bc8b55fb..bf16351c93 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c
@@ -520,7 +520,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log)
static int guc_log_relay_create(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
struct rchan *guc_log_relay_chan;
size_t n_subbufs, subbuf_size;
int ret;
@@ -573,7 +573,7 @@ static void guc_log_relay_destroy(struct intel_guc_log *log)
static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
_guc_log_copy_debuglogs_for_relay(log);
@@ -589,7 +589,7 @@ static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log)
static u32 __get_default_log_level(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
/* A negative value means "use platform/config default" */
if (i915->params.guc_log_level < 0) {
@@ -664,7 +664,7 @@ void intel_guc_log_destroy(struct intel_guc_log *log)
int intel_guc_log_set_level(struct intel_guc_log *log, u32 level)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
intel_wakeref_t wakeref;
int ret = 0;
@@ -796,7 +796,7 @@ void intel_guc_log_relay_flush(struct intel_guc_log *log)
static void guc_log_relay_stop(struct intel_guc_log *log)
{
struct intel_guc *guc = log_to_guc(log);
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!log->relay.started)
return;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
index 1adec6de22..9df7927304 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_rc.c
@@ -14,7 +14,7 @@ static bool __guc_rc_supported(struct intel_guc *guc)
{
/* GuC RC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_rc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
index 2dfb07cc4b..3e681ab6fb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_slpc.c
@@ -34,7 +34,7 @@ static bool __detect_slpc_supported(struct intel_guc *guc)
{
/* GuC SLPC is unavailable for pre-Gen12 */
return guc->submission_supported &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 12;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 12;
}
static bool __guc_slpc_selected(struct intel_guc *guc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 17df71117c..a259f1118c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1107,7 +1107,7 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -1303,6 +1303,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
unsigned long flags;
u32 reset_count;
bool in_reset;
+ intel_wakeref_t wakeref;
spin_lock_irqsave(&guc->timestamp.lock, flags);
@@ -1325,7 +1326,8 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
* start_gt_clk is derived from GuC state. To get a consistent
* view of activity, we query the GuC state only if gt is awake.
*/
- if (!in_reset && intel_gt_pm_get_if_awake(gt)) {
+ wakeref = in_reset ? 0 : intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
stats_saved = *stats;
gt_stamp_saved = guc->timestamp.gt_stamp;
/*
@@ -1334,7 +1336,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now)
*/
guc_update_engine_gt_clks(engine);
guc_update_pm_timestamp(guc, now);
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
if (i915_reset_count(gpu_error) != reset_count) {
*stats = stats_saved;
guc->timestamp.gt_stamp = gt_stamp_saved;
@@ -3385,9 +3387,9 @@ static void destroyed_worker_func(struct work_struct *w)
struct intel_guc *guc = container_of(w, struct intel_guc,
submission_state.destroyed_worker);
struct intel_gt *gt = guc_to_gt(guc);
- int tmp;
+ intel_wakeref_t wakeref;
- with_intel_gt_pm(gt, tmp)
+ with_intel_gt_pm(gt, wakeref)
deregister_destroyed_contexts(guc);
}
@@ -4624,12 +4626,12 @@ static bool __guc_submission_supported(struct intel_guc *guc)
{
/* GuC submission is unavailable for pre-Gen11 */
return intel_guc_is_supported(guc) &&
- GRAPHICS_VER(guc_to_gt(guc)->i915) >= 11;
+ GRAPHICS_VER(guc_to_i915(guc)) >= 11;
}
static bool __guc_submission_selected(struct intel_guc *guc)
{
- struct drm_i915_private *i915 = guc_to_gt(guc)->i915;
+ struct drm_i915_private *i915 = guc_to_i915(guc);
if (!intel_guc_submission_is_supported(guc))
return false;
@@ -4894,7 +4896,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
- intel_gt_pm_put_async(guc_to_gt(guc));
+ intel_gt_pm_put_async_untracked(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 27f6561dd7..3872d309ed 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -106,11 +106,6 @@ static void __confirm_options(struct intel_uc *uc)
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
i915->params.enable_guc, "GuC is not supported!");
- if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC &&
- !intel_uc_supports_huc(uc))
- gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
- i915->params.enable_guc, "HuC is not supported!");
-
if (i915->params.enable_guc & ENABLE_GUC_SUBMISSION &&
!intel_uc_supports_guc_submission(uc))
gt_info(gt, "Incompatible option enable_guc=%d - %s\n",
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 362639162e..756093eaf2 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1343,16 +1343,13 @@ size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
u32 len = min_t(u32, size, PAGE_SIZE - offset);
- void *vaddr;
if (idx > 0) {
idx--;
continue;
}
- vaddr = kmap_atomic(page);
- memcpy(dst, vaddr + offset, len);
- kunmap_atomic(vaddr);
+ memcpy_from_page(dst, page, offset, len);
offset = 0;
dst += len;
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index bfb7214356..c900aac85a 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -286,11 +286,126 @@ err_wakeref:
return ret;
}
+/*
+ * Send a context schedule H2G message with an invalid context id.
+ * This should generate a GUC_RESULT_INVALID_CONTEXT response.
+ */
+static int bad_h2g(struct intel_guc *guc)
+{
+ u32 action[] = {
+ INTEL_GUC_ACTION_SCHED_CONTEXT,
+ 0x12345678,
+ };
+
+ return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0);
+}
+
+/*
+ * Set a spinner running to make sure the system is alive and active,
+ * then send a bad but asynchronous H2G command and wait to see if an
+ * error response is returned. If no response is received or if the
+ * spinner dies then the test will fail.
+ */
+#define FAST_RESPONSE_TIMEOUT_MS 1000
+static int intel_guc_fast_request(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_context *ce;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine = intel_selftest_find_any_engine(gt);
+ bool spinning = false;
+ int ret = 0;
+
+ if (!engine)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ gt_err(gt, "Failed to create spinner request: %pe\n", ce);
+ goto err_pm;
+ }
+
+ ret = igt_spinner_init(&spin, engine->gt);
+ if (ret) {
+ gt_err(gt, "Failed to create spinner: %pe\n", ERR_PTR(ret));
+ goto err_pm;
+ }
+ spinning = true;
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ gt_err(gt, "Failed to create spinner request: %pe\n", rq);
+ goto err_spin;
+ }
+
+ ret = request_add_spin(rq, &spin);
+ if (ret) {
+ gt_err(gt, "Failed to add Spinner request: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ gt->uc.guc.fast_response_selftest = 1;
+
+ ret = bad_h2g(&gt->uc.guc);
+ if (ret) {
+ gt_err(gt, "Failed to send H2G: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ ret = wait_for(gt->uc.guc.fast_response_selftest != 1 || i915_request_completed(rq),
+ FAST_RESPONSE_TIMEOUT_MS);
+ if (ret) {
+ gt_err(gt, "Request wait failed: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+ if (i915_request_completed(rq)) {
+ gt_err(gt, "Spinner died waiting for fast request error!\n");
+ ret = -EIO;
+ goto err_rq;
+ }
+
+ if (gt->uc.guc.fast_response_selftest != 2) {
+ gt_err(gt, "Unexpected fast response count: %d\n",
+ gt->uc.guc.fast_response_selftest);
+ goto err_rq;
+ }
+
+ igt_spinner_end(&spin);
+ spinning = false;
+
+ ret = intel_selftest_wait_for_rq(rq);
+ if (ret) {
+ gt_err(gt, "Request failed to complete: %pe\n", ERR_PTR(ret));
+ goto err_rq;
+ }
+
+err_rq:
+ i915_request_put(rq);
+
+err_spin:
+ if (spinning)
+ igt_spinner_end(&spin);
+ igt_spinner_fini(&spin);
+
+err_pm:
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+ return ret;
+}
+
int intel_guc_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(intel_guc_scrub_ctbs),
SUBTEST(intel_guc_steal_guc_ids),
+ SUBTEST(intel_guc_fast_request),
};
struct intel_gt *gt = to_gt(i915);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
index 34b5d952e2..26fdc392fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_hangcheck.c
@@ -74,7 +74,7 @@ static int intel_hang_guc(void *arg)
goto err;
}
- rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
intel_context_put(ce);
if (IS_ERR(rq)) {
ret = PTR_ERR(rq);
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 05f9348b7a..d4a3f3e093 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -3047,7 +3047,7 @@ put_obj:
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
- u32 per_ctx_start[CACHELINE_DWORDS] = {0};
+ u32 per_ctx_start[CACHELINE_DWORDS] = {};
unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid)
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 835c3fde8a..313efdabee 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -56,7 +56,7 @@ static const struct pixel_format bdw_pixel_formats[] = {
{DRM_FORMAT_XBGR8888, 32, "32-bit RGBX (8:8:8:8 MSB-X:B:G:R)"},
/* non-supported format has bpp default to 0 */
- {0, 0, NULL},
+ {}
};
static const struct pixel_format skl_pixel_formats[] = {
@@ -76,7 +76,7 @@ static const struct pixel_format skl_pixel_formats[] = {
{DRM_FORMAT_XRGB2101010, 32, "32-bit BGRX (2:10:10:10 MSB-X:R:G:B)"},
/* non-supported format has bpp default to 0 */
- {0, 0, NULL},
+ {}
};
static int bdw_format_to_drm(int format)
@@ -293,7 +293,7 @@ static const struct cursor_mode_format cursor_pixel_formats[] = {
{DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"},
/* non-supported format has bpp default to 0 */
- {0, 0, 0, 0, NULL},
+ {}
};
static int cursor_mode_to_drm(int mode)
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index d30f8814d9..efcb00472b 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -538,7 +538,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
int refclk = vgpu->gvt->gt->i915->display.dpll.ref_clks.nssc;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
- struct dpll clock = {0};
+ struct dpll clock = {};
u32 temp;
/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
@@ -2576,7 +2576,6 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index ddf49c2dbb..2905df83e1 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -1211,11 +1211,11 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(remain, PAGE_SIZE - x);
- src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
+ src = kmap_local_page(i915_gem_object_get_page(src_obj, n));
if (src_needs_clflush)
drm_clflush_virt_range(src + x, len);
memcpy(ptr, src + x, len);
- kunmap_atomic(src);
+ kunmap_local(src);
ptr += len;
remain -= len;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e9b79c2c37..db99c2ef66 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,8 @@
#include <drm/drm_debugfs.h>
+#include "display/intel_display_params.h"
+
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
@@ -49,6 +51,7 @@
#include "i915_debugfs.h"
#include "i915_debugfs_params.h"
#include "i915_driver.h"
+#include "i915_gpu_error.h"
#include "i915_irq.h"
#include "i915_reg.h"
#include "i915_scheduler.h"
@@ -67,13 +70,13 @@ static int i915_capabilities(struct seq_file *m, void *data)
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(i915));
intel_device_info_print(INTEL_INFO(i915), RUNTIME_INFO(i915), &p);
- intel_display_device_info_print(DISPLAY_INFO(i915), DISPLAY_RUNTIME_INFO(i915), &p);
i915_print_iommu_status(i915, &p);
intel_gt_info_print(&to_gt(i915)->info, &p);
intel_driver_caps_print(&i915->caps, &p);
kernel_param_lock(THIS_MODULE);
i915_params_dump(&i915->params, &p);
+ intel_display_params_dump(i915, &p);
kernel_param_unlock(THIS_MODULE);
return 0;
@@ -297,107 +300,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
return 0;
}
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
- size_t count, loff_t *pos)
-{
- struct i915_gpu_coredump *error;
- ssize_t ret;
- void *buf;
-
- error = file->private_data;
- if (!error)
- return 0;
-
- /* Bounce buffer required because of kernfs __user API convenience. */
- buf = kmalloc(count, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
- if (ret <= 0)
- goto out;
-
- if (!copy_to_user(ubuf, buf, ret))
- *pos += ret;
- else
- ret = -EFAULT;
-
-out:
- kfree(buf);
- return ret;
-}
-
-static int gpu_state_release(struct inode *inode, struct file *file)
-{
- i915_gpu_coredump_put(file->private_data);
- return 0;
-}
-
-static int i915_gpu_info_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *i915 = inode->i_private;
- struct i915_gpu_coredump *gpu;
- intel_wakeref_t wakeref;
-
- gpu = NULL;
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
-
- if (IS_ERR(gpu))
- return PTR_ERR(gpu);
-
- file->private_data = gpu;
- return 0;
-}
-
-static const struct file_operations i915_gpu_info_fops = {
- .owner = THIS_MODULE,
- .open = i915_gpu_info_open,
- .read = gpu_state_read,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-
-static ssize_t
-i915_error_state_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt,
- loff_t *ppos)
-{
- struct i915_gpu_coredump *error = filp->private_data;
-
- if (!error)
- return 0;
-
- drm_dbg(&error->i915->drm, "Resetting error state\n");
- i915_reset_error_state(error->i915);
-
- return cnt;
-}
-
-static int i915_error_state_open(struct inode *inode, struct file *file)
-{
- struct i915_gpu_coredump *error;
-
- error = i915_first_error_state(inode->i_private);
- if (IS_ERR(error))
- return PTR_ERR(error);
-
- file->private_data = error;
- return 0;
-}
-
-static const struct file_operations i915_error_state_fops = {
- .owner = THIS_MODULE,
- .open = i915_error_state_open,
- .read = gpu_state_read,
- .write = i915_error_state_write,
- .llseek = default_llseek,
- .release = gpu_state_release,
-};
-#endif
-
static int i915_frequency_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -837,10 +739,6 @@ static const struct i915_debugfs_files {
{"i915_perf_noa_delay", &i915_perf_noa_delay_fops},
{"i915_wedged", &i915_wedged_fops},
{"i915_gem_drop_caches", &i915_drop_caches_fops},
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
- {"i915_error_state", &i915_error_state_fops},
- {"i915_gpu_info", &i915_gpu_info_fops},
-#endif
};
void i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -863,4 +761,6 @@ void i915_debugfs_register(struct drm_i915_private *dev_priv)
drm_debugfs_create_files(i915_debugfs_list,
ARRAY_SIZE(i915_debugfs_list),
minor->debugfs_root, minor);
+
+ i915_gpu_error_debugfs_register(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index 802de2c6de..9967148aed 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -231,16 +231,10 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
- mutex_init(&dev_priv->display.backlight.lock);
mutex_init(&dev_priv->sb_lock);
cpu_latency_qos_add_request(&dev_priv->sb_qos, PM_QOS_DEFAULT_VALUE);
- mutex_init(&dev_priv->display.audio.mutex);
- mutex_init(&dev_priv->display.wm.wm_mutex);
- mutex_init(&dev_priv->display.pps.mutex);
- mutex_init(&dev_priv->display.hdcp.hdcp_mutex);
-
i915_memcpy_init_early(dev_priv);
intel_runtime_pm_init_early(&dev_priv->runtime_pm);
@@ -804,7 +798,9 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_cleanup_modeset2;
- intel_pxp_init(i915);
+ ret = intel_pxp_init(i915);
+ if (ret && ret != -ENODEV)
+ drm_dbg(&i915->drm, "pxp init failed with %d\n", ret);
ret = intel_display_driver_probe(i915);
if (ret)
@@ -907,6 +903,8 @@ static void i915_driver_release(struct drm_device *dev)
intel_runtime_pm_driver_release(rpm);
i915_driver_late_release(dev_priv);
+
+ intel_display_device_remove(dev_priv);
}
static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
@@ -1037,7 +1035,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915)
intel_power_domains_driver_remove(i915);
enable_rpm_wakeref_asserts(&i915->runtime_pm);
- intel_runtime_pm_driver_release(&i915->runtime_pm);
+ intel_runtime_pm_driver_last_release(&i915->runtime_pm);
}
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c
index 2a44b3876c..fa6852713b 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.c
+++ b/drivers/gpu/drm/i915/i915_drm_client.c
@@ -28,6 +28,10 @@ struct i915_drm_client *i915_drm_client_alloc(void)
kref_init(&client->kref);
spin_lock_init(&client->ctx_lock);
INIT_LIST_HEAD(&client->ctx_list);
+#ifdef CONFIG_PROC_FS
+ spin_lock_init(&client->objects_lock);
+ INIT_LIST_HEAD(&client->objects_list);
+#endif
return client;
}
@@ -41,6 +45,68 @@ void __i915_drm_client_free(struct kref *kref)
}
#ifdef CONFIG_PROC_FS
+static void
+obj_meminfo(struct drm_i915_gem_object *obj,
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN])
+{
+ const enum intel_region_id id = obj->mm.region ?
+ obj->mm.region->id : INTEL_REGION_SMEM;
+ const u64 sz = obj->base.size;
+
+ if (obj->base.handle_count > 1)
+ stats[id].shared += sz;
+ else
+ stats[id].private += sz;
+
+ if (i915_gem_object_has_pages(obj)) {
+ stats[id].resident += sz;
+
+ if (!dma_resv_test_signaled(obj->base.resv,
+ DMA_RESV_USAGE_BOOKKEEP))
+ stats[id].active += sz;
+ else if (i915_gem_object_is_shrinkable(obj) &&
+ obj->mm.madv == I915_MADV_DONTNEED)
+ stats[id].purgeable += sz;
+ }
+}
+
+static void show_meminfo(struct drm_printer *p, struct drm_file *file)
+{
+ struct drm_memory_stats stats[INTEL_REGION_UNKNOWN] = {};
+ struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_drm_client *client = fpriv->client;
+ struct drm_i915_private *i915 = fpriv->i915;
+ struct drm_i915_gem_object *obj;
+ struct intel_memory_region *mr;
+ struct list_head __rcu *pos;
+ unsigned int id;
+
+ /* Public objects. */
+ spin_lock(&file->table_lock);
+ idr_for_each_entry(&file->object_idr, obj, id)
+ obj_meminfo(obj, stats);
+ spin_unlock(&file->table_lock);
+
+ /* Internal objects. */
+ rcu_read_lock();
+ list_for_each_rcu(pos, &client->objects_list) {
+ obj = i915_gem_object_get_rcu(list_entry(pos, typeof(*obj),
+ client_link));
+ if (!obj)
+ continue;
+ obj_meminfo(obj, stats);
+ i915_gem_object_put(obj);
+ }
+ rcu_read_unlock();
+
+ for_each_memory_region(mr, i915, id)
+ drm_print_memory_stats(p,
+ &stats[id],
+ DRM_GEM_OBJECT_RESIDENT |
+ DRM_GEM_OBJECT_PURGEABLE,
+ mr->uabi_name);
+}
+
static const char * const uabi_class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "render",
[I915_ENGINE_CLASS_COPY] = "copy",
@@ -102,10 +168,52 @@ void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file)
* ******************************************************************
*/
+ show_meminfo(p, file);
+
if (GRAPHICS_VER(i915) < 8)
return;
for (i = 0; i < ARRAY_SIZE(uabi_class_names); i++)
show_client_class(p, i915, file_priv->client, i);
}
+
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+ unsigned long flags;
+
+ GEM_WARN_ON(obj->client);
+ GEM_WARN_ON(!list_empty(&obj->client_link));
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ obj->client = i915_drm_client_get(client);
+ list_add_tail_rcu(&obj->client_link, &client->objects_list);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+}
+
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+ struct i915_drm_client *client = fetch_and_zero(&obj->client);
+ unsigned long flags;
+
+ /* Object may not be associated with a client. */
+ if (!client)
+ return;
+
+ spin_lock_irqsave(&client->objects_lock, flags);
+ list_del_rcu(&obj->client_link);
+ spin_unlock_irqrestore(&client->objects_lock, flags);
+
+ i915_drm_client_put(client);
+}
+
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+ if (ce->state)
+ i915_drm_client_add_object(client, ce->state->obj);
+
+ if (ce->ring != ce->engine->legacy.ring && ce->ring->vma)
+ i915_drm_client_add_object(client, ce->ring->vma->obj);
+}
#endif
diff --git a/drivers/gpu/drm/i915/i915_drm_client.h b/drivers/gpu/drm/i915/i915_drm_client.h
index 67816c912b..a439dd7899 100644
--- a/drivers/gpu/drm/i915/i915_drm_client.h
+++ b/drivers/gpu/drm/i915/i915_drm_client.h
@@ -12,6 +12,10 @@
#include <uapi/drm/i915_drm.h>
+#include "i915_file_private.h"
+#include "gem/i915_gem_object_types.h"
+#include "gt/intel_context_types.h"
+
#define I915_LAST_UABI_ENGINE_CLASS I915_ENGINE_CLASS_COMPUTE
struct drm_file;
@@ -25,6 +29,20 @@ struct i915_drm_client {
spinlock_t ctx_lock; /* For add/remove from ctx_list. */
struct list_head ctx_list; /* List of contexts belonging to client. */
+#ifdef CONFIG_PROC_FS
+ /**
+ * @objects_lock: lock protecting @objects_list
+ */
+ spinlock_t objects_lock;
+
+ /**
+ * @objects_list: list of objects created by this client
+ *
+ * Protected by @objects_lock.
+ */
+ struct list_head objects_list;
+#endif
+
/**
* @past_runtime: Accumulation of pphwsp runtimes from closed contexts.
*/
@@ -49,4 +67,28 @@ struct i915_drm_client *i915_drm_client_alloc(void);
void i915_drm_client_fdinfo(struct drm_printer *p, struct drm_file *file);
+#ifdef CONFIG_PROC_FS
+void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj);
+void i915_drm_client_remove_object(struct drm_i915_gem_object *obj);
+void i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce);
+#else
+static inline void i915_drm_client_add_object(struct i915_drm_client *client,
+ struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_remove_object(struct drm_i915_gem_object *obj)
+{
+}
+
+static inline void
+i915_drm_client_add_context_objects(struct i915_drm_client *client,
+ struct intel_context *ce)
+{
+}
+#endif
+
#endif /* !__I915_DRM_CLIENT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index dd452c220d..861567362a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -396,20 +396,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
return i915->gt[0];
}
-/* Simple iterator over all initialised engines */
-#define for_each_engine(engine__, gt__, id__) \
- for ((id__) = 0; \
- (id__) < I915_NUM_ENGINES; \
- (id__)++) \
- for_each_if ((engine__) = (gt__)->engine[(id__)])
-
-/* Iterator over subset of engines selected by mask */
-#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \
- for ((tmp__) = (mask__) & (gt__)->info.engine_mask; \
- (tmp__) ? \
- ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \
- 0;)
-
#define rb_to_uabi_engine(rb) \
rb_entry_safe(rb, struct intel_engine_cs, uabi_node)
@@ -418,11 +404,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
(engine__); \
(engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-#define for_each_uabi_class_engine(engine__, class__, i915__) \
- for ((engine__) = intel_engine_lookup_user((i915__), (class__), 0); \
- (engine__) && (engine__)->uabi_class == (class__); \
- (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node)))
-
#define INTEL_INFO(i915) ((i915)->__info)
#define RUNTIME_INFO(i915) (&(i915)->__runtime)
#define DRIVER_CAPS(i915) (&(i915)->caps)
@@ -575,6 +556,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define IS_DG2(i915) IS_PLATFORM(i915, INTEL_DG2)
#define IS_PONTEVECCHIO(i915) IS_PLATFORM(i915, INTEL_PONTEVECCHIO)
#define IS_METEORLAKE(i915) IS_PLATFORM(i915, INTEL_METEORLAKE)
+#define IS_LUNARLAKE(i915) 0
#define IS_DG2_G10(i915) \
IS_SUBPLATFORM(i915, INTEL_DG2, INTEL_SUBPLATFORM_G10)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c166ad5e18..92758b6b41 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1306,8 +1306,6 @@ void i915_gem_init_early(struct drm_i915_private *dev_priv)
{
i915_gem_init__mm(dev_priv);
i915_gem_init__contexts(dev_priv);
-
- spin_lock_init(&dev_priv->display.fb_tracking.lock);
}
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index b4e31e59c7..d04660b600 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -57,6 +57,7 @@
#include "i915_memcpy.h"
#include "i915_reg.h"
#include "i915_scatterlist.h"
+#include "i915_sysfs.h"
#include "i915_utils.h"
#define ALLOW_FAIL (__GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
@@ -520,7 +521,7 @@ __find_vma(struct i915_vma_coredump *vma, const char *name)
return NULL;
}
-struct i915_vma_coredump *
+static struct i915_vma_coredump *
intel_gpu_error_find_batch(const struct intel_engine_coredump *ee)
{
return __find_vma(ee->vma, "batch");
@@ -609,9 +610,9 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma)
+static void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
+ const struct intel_engine_cs *engine,
+ const struct i915_vma_coredump *vma)
{
char out[ASCII85_BUFSZ];
struct page *page;
@@ -660,6 +661,7 @@ static void err_print_params(struct drm_i915_error_state_buf *m,
struct drm_printer p = i915_error_printer(m);
i915_params_dump(params, &p);
+ intel_display_params_dump(m->i915, &p);
}
static void err_print_pciid(struct drm_i915_error_state_buf *m,
@@ -1027,6 +1029,7 @@ static void i915_vma_coredump_free(struct i915_vma_coredump *vma)
static void cleanup_params(struct i915_gpu_coredump *error)
{
i915_params_free(&error->params);
+ intel_display_params_free(&error->display_params);
}
static void cleanup_uc(struct intel_uc_coredump *uc)
@@ -1988,6 +1991,7 @@ static void capture_gen(struct i915_gpu_coredump *error)
error->suspend_count = i915->suspend_count;
i915_params_copy(&error->params, &i915->params);
+ intel_display_params_copy(&error->display_params);
memcpy(&error->device_info,
INTEL_INFO(i915),
sizeof(error->device_info));
@@ -2137,7 +2141,7 @@ __i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 du
return error;
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_gpu_coredump(struct intel_gt *gt, intel_engine_mask_t engine_mask, u32 dump_flags)
{
static DEFINE_MUTEX(capture_mutex);
@@ -2174,7 +2178,7 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
- pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
+ pr_info("Please see https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html for details.\n");
pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
@@ -2208,7 +2212,7 @@ void i915_capture_error_state(struct intel_gt *gt,
i915_gpu_coredump_put(error);
}
-struct i915_gpu_coredump *
+static struct i915_gpu_coredump *
i915_first_error_state(struct drm_i915_private *i915)
{
struct i915_gpu_coredump *error;
@@ -2375,3 +2379,184 @@ void intel_klog_error_capture(struct intel_gt *gt,
drm_info(&i915->drm, "[Capture/%d.%d] Dumped %zd bytes\n", l_count, line++, pos_err);
}
#endif
+
+static ssize_t gpu_state_read(struct file *file, char __user *ubuf,
+ size_t count, loff_t *pos)
+{
+ struct i915_gpu_coredump *error;
+ ssize_t ret;
+ void *buf;
+
+ error = file->private_data;
+ if (!error)
+ return 0;
+
+ /* Bounce buffer required because of kernfs __user API convenience. */
+ buf = kmalloc(count, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = i915_gpu_coredump_copy_to_buffer(error, buf, *pos, count);
+ if (ret <= 0)
+ goto out;
+
+ if (!copy_to_user(ubuf, buf, ret))
+ *pos += ret;
+ else
+ ret = -EFAULT;
+
+out:
+ kfree(buf);
+ return ret;
+}
+
+static int gpu_state_release(struct inode *inode, struct file *file)
+{
+ i915_gpu_coredump_put(file->private_data);
+ return 0;
+}
+
+static int i915_gpu_info_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *i915 = inode->i_private;
+ struct i915_gpu_coredump *gpu;
+ intel_wakeref_t wakeref;
+
+ gpu = NULL;
+ with_intel_runtime_pm(&i915->runtime_pm, wakeref)
+ gpu = i915_gpu_coredump(to_gt(i915), ALL_ENGINES, CORE_DUMP_FLAG_NONE);
+
+ if (IS_ERR(gpu))
+ return PTR_ERR(gpu);
+
+ file->private_data = gpu;
+ return 0;
+}
+
+static const struct file_operations i915_gpu_info_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_gpu_info_open,
+ .read = gpu_state_read,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+static ssize_t
+i915_error_state_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt,
+ loff_t *ppos)
+{
+ struct i915_gpu_coredump *error = filp->private_data;
+
+ if (!error)
+ return 0;
+
+ drm_dbg(&error->i915->drm, "Resetting error state\n");
+ i915_reset_error_state(error->i915);
+
+ return cnt;
+}
+
+static int i915_error_state_open(struct inode *inode, struct file *file)
+{
+ struct i915_gpu_coredump *error;
+
+ error = i915_first_error_state(inode->i_private);
+ if (IS_ERR(error))
+ return PTR_ERR(error);
+
+ file->private_data = error;
+ return 0;
+}
+
+static const struct file_operations i915_error_state_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_error_state_open,
+ .read = gpu_state_read,
+ .write = i915_error_state_write,
+ .llseek = default_llseek,
+ .release = gpu_state_release,
+};
+
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+
+ debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915,
+ &i915_error_state_fops);
+ debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915,
+ &i915_gpu_info_fops);
+}
+
+static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
+ struct i915_gpu_coredump *gpu;
+ ssize_t ret = 0;
+
+ /*
+ * FIXME: Concurrent clients triggering resets and reading + clearing
+ * dumps can cause inconsistent sysfs reads when a user calls in with a
+ * non-zero offset to complete a prior partial read but the
+ * gpu_coredump has been cleared or replaced.
+ */
+
+ gpu = i915_first_error_state(i915);
+ if (IS_ERR(gpu)) {
+ ret = PTR_ERR(gpu);
+ } else if (gpu) {
+ ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
+ i915_gpu_coredump_put(gpu);
+ } else {
+ const char *str = "No error state collected\n";
+ size_t len = strlen(str);
+
+ if (off < len) {
+ ret = min_t(size_t, count, len - off);
+ memcpy(buf, str + off, ret);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t error_state_write(struct file *file, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *kdev = kobj_to_dev(kobj);
+ struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
+
+ drm_dbg(&dev_priv->drm, "Resetting error state\n");
+ i915_reset_error_state(dev_priv);
+
+ return count;
+}
+
+static const struct bin_attribute error_state_attr = {
+ .attr.name = "error",
+ .attr.mode = S_IRUSR | S_IWUSR,
+ .size = 0,
+ .read = error_state_read,
+ .write = error_state_write,
+};
+
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
+ drm_err(&i915->drm, "error_state sysfs setup failed\n");
+}
+
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+ struct device *kdev = i915->drm.primary->kdev;
+
+ sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
+}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 48f6c00402..7c255bb1c3 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -15,6 +15,7 @@
#include <drm/drm_mm.h>
#include "display/intel_display_device.h"
+#include "display/intel_display_params.h"
#include "gt/intel_engine.h"
#include "gt/intel_engine_types.h"
#include "gt/intel_gt_types.h"
@@ -215,6 +216,7 @@ struct i915_gpu_coredump {
struct intel_display_runtime_info display_runtime_info;
struct intel_driver_caps driver_caps;
struct i915_params params;
+ struct intel_display_params display_params;
struct intel_overlay_error_state *overlay;
@@ -283,14 +285,7 @@ static inline void intel_klog_error_capture(struct intel_gt *gt,
__printf(2, 3)
void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
-void intel_gpu_error_print_vma(struct drm_i915_error_state_buf *m,
- const struct intel_engine_cs *engine,
- const struct i915_vma_coredump *vma);
-struct i915_vma_coredump *
-intel_gpu_error_find_batch(const struct intel_engine_coredump *ee);
-
-struct i915_gpu_coredump *i915_gpu_coredump(struct intel_gt *gt,
- intel_engine_mask_t engine_mask, u32 dump_flags);
+
void i915_capture_error_state(struct intel_gt *gt,
intel_engine_mask_t engine_mask, u32 dump_flags);
@@ -338,10 +333,13 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
kref_put(&gpu->ref, __i915_gpu_coredump_free);
}
-struct i915_gpu_coredump *i915_first_error_state(struct drm_i915_private *i915);
void i915_reset_error_state(struct drm_i915_private *i915);
void i915_disable_error_state(struct drm_i915_private *i915, int err);
+void i915_gpu_error_debugfs_register(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915);
+void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915);
+
#else
__printf(2, 3)
@@ -409,12 +407,6 @@ static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
-static inline struct i915_gpu_coredump *
-i915_first_error_state(struct drm_i915_private *i915)
-{
- return ERR_PTR(-ENODEV);
-}
-
static inline void i915_reset_error_state(struct drm_i915_private *i915)
{
}
@@ -424,6 +416,18 @@ static inline void i915_disable_error_state(struct drm_i915_private *i915,
{
}
+static inline void i915_gpu_error_debugfs_register(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915)
+{
+}
+
+static inline void i915_gpu_error_sysfs_teardown(struct drm_i915_private *i915)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) */
#endif /* _I915_GPU_ERROR_H_ */
diff --git a/drivers/gpu/drm/i915/i915_memcpy.c b/drivers/gpu/drm/i915/i915_memcpy.c
index 1b021a4902..ba82277254 100644
--- a/drivers/gpu/drm/i915/i915_memcpy.c
+++ b/drivers/gpu/drm/i915/i915_memcpy.c
@@ -23,6 +23,8 @@
*/
#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/cpufeature.h>
#include <asm/fpu/api.h>
#include "i915_memcpy.h"
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 036c4c3ed6..de43048543 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -67,33 +67,9 @@ i915_param_named(modeset, int, 0400,
"Use kernel modesetting [KMS] (0=disable, "
"1=on, -1=force vga console preference [default])");
-i915_param_named_unsafe(enable_dc, int, 0400,
- "Enable power-saving display C-states. "
- "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; "
- "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)");
-
-i915_param_named_unsafe(enable_fbc, int, 0400,
- "Enable frame buffer compression for power savings "
- "(default: -1 (use per-chip default))");
-
-i915_param_named_unsafe(lvds_channel_mode, int, 0400,
- "Specify LVDS channel mode "
- "(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
-
-i915_param_named_unsafe(panel_use_ssc, int, 0400,
- "Use Spread Spectrum Clock with panels [LVDS/eDP] "
- "(default: auto from VBT)");
-
-i915_param_named_unsafe(vbt_sdvo_panel_type, int, 0400,
- "Override/Ignore selection of SDVO panel mode in the VBT "
- "(-2=ignore, -1=auto [default], index in VBT BIOS table)");
-
i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
-i915_param_named_unsafe(vbt_firmware, charp, 0400,
- "Load VBT from specified file under /lib/firmware");
-
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
i915_param_named(error_capture, bool, 0400,
"Record the GPU state following a hang. "
@@ -106,55 +82,10 @@ i915_param_named_unsafe(enable_hangcheck, bool, 0400,
"WARNING: Disabling this can cause system wide hangs. "
"(default: true)");
-i915_param_named_unsafe(enable_psr, int, 0400,
- "Enable PSR "
- "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) "
- "Default: -1 (use per-chip default)");
-
-i915_param_named(psr_safest_params, bool, 0400,
- "Replace PSR VBT parameters by the safest and not optimal ones. This "
- "is helpful to detect if PSR issues are related to bad values set in "
- " VBT. (0=use VBT parameters, 1=use safest parameters)");
-
-i915_param_named_unsafe(enable_psr2_sel_fetch, bool, 0400,
- "Enable PSR2 selective fetch "
- "(0=disabled, 1=enabled) "
- "Default: 0");
-
-i915_param_named_unsafe(enable_sagv, bool, 0600,
- "Enable system agent voltage/frequency scaling (SAGV) (default: true)");
-
i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe options for specified supported devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
-i915_param_named_unsafe(disable_power_well, int, 0400,
- "Disable display power wells when possible "
- "(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
-
-i915_param_named_unsafe(enable_ips, int, 0400, "Enable IPS (default: true)");
-
-i915_param_named_unsafe(enable_dpt, bool, 0400,
- "Enable display page table (DPT) (default: true)");
-
-i915_param_named_unsafe(load_detect_test, bool, 0400,
- "Force-enable the VGA load detect code for testing (default:false). "
- "For developers only.");
-
-i915_param_named_unsafe(force_reset_modeset_test, bool, 0400,
- "Force a modeset during gpu reset for testing (default:false). "
- "For developers only.");
-
-i915_param_named_unsafe(invert_brightness, int, 0400,
- "Invert backlight brightness "
- "(-1 force normal, 0 machine defaults, 1 force inversion), please "
- "report PCI device ID, subsystem vendor and subsystem device ID "
- "to dri-devel@lists.freedesktop.org, if your machine needs it. "
- "It will then be included in an upcoming module version.");
-
-i915_param_named(disable_display, bool, 0400,
- "Disable display (default: false)");
-
i915_param_named(memtest, bool, 0400,
"Perform a read/write test of all device memory on module load (default: off)");
@@ -162,19 +93,6 @@ i915_param_named(mmio_debug, int, 0400,
"Enable the MMIO debug code for the first N failures (default: off). "
"This may negatively affect performance.");
-/* Special case writable file */
-i915_param_named(verbose_state_checks, bool, 0600,
- "Enable verbose logs (ie. WARN_ON()) in case of unexpected hw state conditions.");
-
-i915_param_named_unsafe(nuclear_pageflip, bool, 0400,
- "Force enable atomic functionality on platforms that don't have full support yet.");
-
-/* WA to get away with the default setting in VBT for early platforms.Will be removed */
-i915_param_named_unsafe(edp_vswing, int, 0400,
- "Ignore/Override vswing pre-emph table selection from VBT "
- "(0=use value from vbt [default], 1=low power swing(200mV),"
- "2=default swing(400mV))");
-
i915_param_named_unsafe(enable_guc, int, 0400,
"Enable GuC load for GuC submission and/or HuC load. "
"Required functionality can be selected using bitmask values. "
@@ -196,18 +114,11 @@ i915_param_named_unsafe(dmc_firmware_path, charp, 0400,
i915_param_named_unsafe(gsc_firmware_path, charp, 0400,
"GSC firmware path to use instead of the default one");
-i915_param_named_unsafe(enable_dp_mst, bool, 0400,
- "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)");
-
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
i915_param_named_unsafe(inject_probe_failure, uint, 0400,
"Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
#endif
-i915_param_named(enable_dpcd_backlight, int, 0400,
- "Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enable, 2=force VESA interface, 3=force Intel interface)");
-
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
"Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index d5194b039a..1315d7fac8 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -46,21 +46,7 @@ struct drm_printer;
* debugfs file
*/
#define I915_PARAMS_FOR_EACH(param) \
- param(char *, vbt_firmware, NULL, 0400) \
param(int, modeset, -1, 0400) \
- param(int, lvds_channel_mode, 0, 0400) \
- param(int, panel_use_ssc, -1, 0600) \
- param(int, vbt_sdvo_panel_type, -1, 0400) \
- param(int, enable_dc, -1, 0400) \
- param(int, enable_fbc, -1, 0600) \
- param(int, enable_psr, -1, 0600) \
- param(bool, enable_dpt, true, 0400) \
- param(bool, psr_safest_params, false, 0400) \
- param(bool, enable_psr2_sel_fetch, true, 0400) \
- param(bool, enable_sagv, true, 0600) \
- param(int, disable_power_well, -1, 0400) \
- param(int, enable_ips, 1, 0600) \
- param(int, invert_brightness, 0, 0600) \
param(int, enable_guc, -1, 0400) \
param(int, guc_log_level, -1, 0400) \
param(char *, guc_firmware_path, NULL, 0400) \
@@ -69,23 +55,15 @@ struct drm_printer;
param(char *, gsc_firmware_path, NULL, 0400) \
param(bool, memtest, false, 0400) \
param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
- param(int, edp_vswing, 0, 0400) \
param(unsigned int, reset, 3, 0600) \
param(unsigned int, inject_probe_failure, 0, 0) \
- param(int, enable_dpcd_backlight, -1, 0600) \
param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
param(unsigned int, request_timeout_ms, CONFIG_DRM_I915_REQUEST_TIMEOUT, CONFIG_DRM_I915_REQUEST_TIMEOUT ? 0600 : 0) \
param(unsigned int, lmem_size, 0, 0400) \
param(unsigned int, lmem_bar_size, 0, 0400) \
/* leave bools at the end to not create holes */ \
param(bool, enable_hangcheck, true, 0600) \
- param(bool, load_detect_test, false, 0600) \
- param(bool, force_reset_modeset_test, false, 0600) \
param(bool, error_capture, true, IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) ? 0600 : 0) \
- param(bool, disable_display, false, 0400) \
- param(bool, verbose_state_checks, true, 0) \
- param(bool, nuclear_pageflip, false, 0400) \
- param(bool, enable_dp_mst, true, 0600) \
param(bool, enable_gvt, false, IS_ENABLED(CONFIG_DRM_I915_GVT) ? 0400 : 0)
#define MEMBER(T, member, ...) T member;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2d695818f0..bd9d812b1a 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -3225,7 +3225,7 @@ u32 i915_perf_oa_timestamp_frequency(struct drm_i915_private *i915)
struct intel_gt *gt = to_gt(i915);
/* Wa_18013179988 */
- if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
+ if (IS_DG2(i915) || IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 74))) {
intel_wakeref_t wakeref;
u32 reg, shift;
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 13b1ae9b96..46445248d1 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -291,7 +291,8 @@ struct i915_perf_stream {
int size_exponent;
/**
- * @ptr_lock: Locks reads and writes to all head/tail state
+ * @oa_buffer.ptr_lock: Locks reads and writes to all
+ * head/tail state
*
* Consider: the head and tail pointer state needs to be read
* consistently from a hrtimer callback (atomic context) and
@@ -313,7 +314,8 @@ struct i915_perf_stream {
spinlock_t ptr_lock;
/**
- * @head: Although we can always read back the head pointer register,
+ * @oa_buffer.head: Although we can always read back
+ * the head pointer register,
* we prefer to avoid trusting the HW state, just to avoid any
* risk that some hardware condition could * somehow bump the
* head pointer unpredictably and cause us to forward the wrong
@@ -322,7 +324,8 @@ struct i915_perf_stream {
u32 head;
/**
- * @tail: The last verified tail that can be read by userspace.
+ * @oa_buffer.tail: The last verified tail that can be
+ * read by userspace.
*/
u32 tail;
} oa_buffer;
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index f861863eb7..21eb0c5b32 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -31,6 +31,16 @@
static cpumask_t i915_pmu_cpumask;
static unsigned int i915_pmu_target_cpu = -1;
+static struct i915_pmu *event_to_pmu(struct perf_event *event)
+{
+ return container_of(event->pmu, struct i915_pmu, base);
+}
+
+static struct drm_i915_private *pmu_to_i915(struct i915_pmu *pmu)
+{
+ return container_of(pmu, struct drm_i915_private, pmu);
+}
+
static u8 engine_config_sample(u64 config)
{
return config & I915_PMU_SAMPLE_MASK;
@@ -141,7 +151,7 @@ static u32 frequency_enabled_mask(void)
static bool pmu_needs_timer(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u32 enable;
/*
@@ -213,19 +223,19 @@ static u64 get_rc6(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
- bool awake = false;
u64 val;
- if (intel_gt_pm_get_if_awake(gt)) {
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (wakeref) {
val = __get_rc6(gt);
- intel_gt_pm_put_async(gt);
- awake = true;
+ intel_gt_pm_put_async(gt, wakeref);
}
spin_lock_irqsave(&pmu->lock, flags);
- if (awake) {
+ if (wakeref) {
store_sample(pmu, gt_id, __I915_SAMPLE_RC6, val);
} else {
/*
@@ -251,7 +261,7 @@ static u64 get_rc6(struct intel_gt *gt)
static void init_rc6(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_gt *gt;
unsigned int i;
@@ -429,12 +439,14 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
const unsigned int gt_id = gt->info.id;
struct i915_pmu *pmu = &i915->pmu;
struct intel_rps *rps = &gt->rps;
+ intel_wakeref_t wakeref;
if (!frequency_sampling_enabled(pmu, gt_id))
return;
/* Report 0/0 (actual/requested) frequency while parked. */
- if (!intel_gt_pm_get_if_awake(gt))
+ wakeref = intel_gt_pm_get_if_awake(gt);
+ if (!wakeref)
return;
if (pmu->enable & config_mask(__I915_PMU_ACTUAL_FREQUENCY(gt_id))) {
@@ -463,14 +475,13 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
period_ns / 1000);
}
- intel_gt_pm_put_async(gt);
+ intel_gt_pm_put_async(gt, wakeref);
}
static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
{
- struct drm_i915_private *i915 =
- container_of(hrtimer, struct drm_i915_private, pmu.timer);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = container_of(hrtimer, struct i915_pmu, timer);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
unsigned int period_ns;
struct intel_gt *gt;
unsigned int i;
@@ -505,8 +516,8 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
static void i915_pmu_event_destroy(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
drm_WARN_ON(&i915->drm, event->parent);
@@ -572,8 +583,8 @@ config_status(struct drm_i915_private *i915, u64 config)
static int engine_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
struct intel_engine_cs *engine;
engine = intel_engine_lookup_user(i915, engine_event_class(event),
@@ -586,9 +597,8 @@ static int engine_event_init(struct perf_event *event)
static int i915_pmu_event_init(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
int ret;
if (pmu->closed)
@@ -628,9 +638,8 @@ static int i915_pmu_event_init(struct perf_event *event)
static u64 __i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
u64 val = 0;
if (is_engine_event(event)) {
@@ -686,10 +695,8 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_event_read(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
struct hw_perf_event *hwc = &event->hw;
- struct i915_pmu *pmu = &i915->pmu;
u64 prev, new;
if (pmu->closed) {
@@ -707,10 +714,9 @@ static void i915_pmu_event_read(struct perf_event *event)
static void i915_pmu_enable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -771,10 +777,9 @@ update:
static void i915_pmu_disable(struct perf_event *event)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
+ struct i915_pmu *pmu = event_to_pmu(event);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
const unsigned int bit = event_bit(event);
- struct i915_pmu *pmu = &i915->pmu;
unsigned long flags;
if (bit == -1)
@@ -818,9 +823,7 @@ static void i915_pmu_disable(struct perf_event *event)
static void i915_pmu_event_start(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return;
@@ -848,9 +851,7 @@ out:
static int i915_pmu_event_add(struct perf_event *event, int flags)
{
- struct drm_i915_private *i915 =
- container_of(event->pmu, typeof(*i915), pmu.base);
- struct i915_pmu *pmu = &i915->pmu;
+ struct i915_pmu *pmu = event_to_pmu(event);
if (pmu->closed)
return -ENODEV;
@@ -982,7 +983,7 @@ add_pmu_attr(struct perf_pmu_events_attr *attr, const char *name,
static struct attribute **
create_event_attributes(struct i915_pmu *pmu)
{
- struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu);
+ struct drm_i915_private *i915 = pmu_to_i915(pmu);
static const struct {
unsigned int counter;
const char *name;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index b89cf2041f..6f1c4e2a99 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -195,8 +195,6 @@
#define DPIO_SFR_BYPASS (1 << 1)
#define DPIO_CMNRST (1 << 0)
-#define DPIO_PHY(pipe) ((pipe) >> 1)
-
/*
* Per pipe/PLL DPIO regs
*/
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index e88bb4f043..613decd477 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -155,81 +155,6 @@ static const struct bin_attribute dpf_attrs_1 = {
.private = (void *)1
};
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
-
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
- struct i915_gpu_coredump *gpu;
- ssize_t ret = 0;
-
- /*
- * FIXME: Concurrent clients triggering resets and reading + clearing
- * dumps can cause inconsistent sysfs reads when a user calls in with a
- * non-zero offset to complete a prior partial read but the
- * gpu_coredump has been cleared or replaced.
- */
-
- gpu = i915_first_error_state(i915);
- if (IS_ERR(gpu)) {
- ret = PTR_ERR(gpu);
- } else if (gpu) {
- ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
- i915_gpu_coredump_put(gpu);
- } else {
- const char *str = "No error state collected\n";
- size_t len = strlen(str);
-
- if (off < len) {
- ret = min_t(size_t, count, len - off);
- memcpy(buf, str + off, ret);
- }
- }
-
- return ret;
-}
-
-static ssize_t error_state_write(struct file *file, struct kobject *kobj,
- struct bin_attribute *attr, char *buf,
- loff_t off, size_t count)
-{
- struct device *kdev = kobj_to_dev(kobj);
- struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
-
- drm_dbg(&dev_priv->drm, "Resetting error state\n");
- i915_reset_error_state(dev_priv);
-
- return count;
-}
-
-static const struct bin_attribute error_state_attr = {
- .attr.name = "error",
- .attr.mode = S_IRUSR | S_IWUSR,
- .size = 0,
- .read = error_state_read,
- .write = error_state_write,
-};
-
-static void i915_setup_error_capture(struct device *kdev)
-{
- if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
- drm_err(&kdev_minor_to_i915(kdev)->drm,
- "error_state sysfs setup failed\n");
-}
-
-static void i915_teardown_error_capture(struct device *kdev)
-{
- sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
-}
-#else
-static void i915_setup_error_capture(struct device *kdev) {}
-static void i915_teardown_error_capture(struct device *kdev) {}
-#endif
-
void i915_setup_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
@@ -255,7 +180,7 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
drm_warn(&dev_priv->drm,
"failed to register GT sysfs directory\n");
- i915_setup_error_capture(kdev);
+ i915_gpu_error_sysfs_setup(dev_priv);
intel_engines_add_sysfs(dev_priv);
}
@@ -264,7 +189,7 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
{
struct device *kdev = dev_priv->drm.primary->kdev;
- i915_teardown_error_capture(kdev);
+ i915_gpu_error_sysfs_teardown(dev_priv);
device_remove_bin_file(kdev, &dpf_attrs_1);
device_remove_bin_file(kdev, &dpf_attrs);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index c61066498b..f98577967b 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -40,7 +40,7 @@
struct drm_i915_private;
struct timer_list;
-#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+#define FDO_BUG_URL "https://drm.pages.freedesktop.org/intel-docs/how-to-file-i915-bugs.html"
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index d09aad34ba..b70715b141 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -34,6 +34,7 @@
#include "gt/intel_engine.h"
#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_tlb.h"
@@ -103,12 +104,42 @@ static inline struct i915_vma *active_to_vma(struct i915_active *ref)
static int __i915_vma_active(struct i915_active *ref)
{
- return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT;
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_tryget(vma))
+ return -ENOENT;
+
+ /*
+ * Exclude global GTT VMA from holding a GT wakeref
+ * while active, otherwise GPU never goes idle.
+ */
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we and our _retire() counterpart can be
+ * called asynchronously, storing a wakeref tracking
+ * handle inside struct i915_vma is not safe, and
+ * there is no other good place for that. Hence,
+ * use untracked variants of intel_gt_pm_get/put().
+ */
+ intel_gt_pm_get_untracked(vma->vm->gt);
+ }
+
+ return 0;
}
static void __i915_vma_retire(struct i915_active *ref)
{
- i915_vma_put(active_to_vma(ref));
+ struct i915_vma *vma = active_to_vma(ref);
+
+ if (!i915_vma_is_ggtt(vma)) {
+ /*
+ * Since we can be called from atomic contexts,
+ * use an async variant of intel_gt_pm_put().
+ */
+ intel_gt_pm_put_async_untracked(vma->vm->gt);
+ }
+
+ i915_vma_put(vma);
}
static struct i915_vma *
@@ -1404,7 +1435,7 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
struct i915_vma_work *work = NULL;
struct dma_fence *moving = NULL;
struct i915_vma_resource *vma_res = NULL;
- intel_wakeref_t wakeref = 0;
+ intel_wakeref_t wakeref;
unsigned int bound;
int err;
@@ -1424,8 +1455,14 @@ int i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww,
if (err)
return err;
- if (flags & PIN_GLOBAL)
- wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
+ /*
+ * In case of a global GTT, we must hold a runtime-pm wakeref
+ * while global PTEs are updated. In other cases, we hold
+ * the rpm reference while the VMA is active. Since runtime
+ * resume may require allocations, which are forbidden inside
+ * vm->mutex, get the first rpm wakeref outside of the mutex.
+ */
+ wakeref = intel_runtime_pm_get(&vma->vm->i915->runtime_pm);
if (flags & vma->vm->bind_async_flags) {
/* lock VM */
@@ -1561,8 +1598,7 @@ err_fence:
if (work)
dma_fence_work_commit_imm(&work->base);
err_rpm:
- if (wakeref)
- intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
+ intel_runtime_pm_put(&vma->vm->i915->runtime_pm, wakeref);
if (moving)
dma_fence_put(moving);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index e98b6d69a9..9b6d87c8b5 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -41,7 +41,7 @@
* To virtualize GPU resources GVT-g driver depends on hypervisor technology
* e.g KVM/VFIO/mdev, Xen, etc. to provide resource access trapping capability
* and be virtualized within GVT-g device module. More architectural design
- * doc is available on https://01.org/group/2230/documentation-list.
+ * doc is available on https://github.com/intel/gvt-linux/wiki.
*/
static LIST_HEAD(intel_gvt_devices);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 3d1fdea981..60a03340bb 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -216,6 +216,22 @@ static int intel_memory_region_memtest(struct intel_memory_region *mem,
return err;
}
+static const char *region_type_str(u16 type)
+{
+ switch (type) {
+ case INTEL_MEMORY_SYSTEM:
+ return "system";
+ case INTEL_MEMORY_LOCAL:
+ return "local";
+ case INTEL_MEMORY_STOLEN_LOCAL:
+ return "stolen-local";
+ case INTEL_MEMORY_STOLEN_SYSTEM:
+ return "stolen-system";
+ default:
+ return "unknown";
+ }
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
@@ -244,6 +260,9 @@ intel_memory_region_create(struct drm_i915_private *i915,
mem->type = type;
mem->instance = instance;
+ snprintf(mem->uabi_name, sizeof(mem->uabi_name), "%s%u",
+ region_type_str(type), instance);
+
mutex_init(&mem->objects.lock);
INIT_LIST_HEAD(&mem->objects.list);
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 2953ed5c32..9ba36454e5 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -80,6 +80,7 @@ struct intel_memory_region {
u16 instance;
enum intel_region_id id;
char name[16];
+ char uabi_name[16];
bool private; /* not for userspace */
struct {
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 8743153fad..860b51b56a 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -50,184 +50,44 @@
* present for a given platform.
*/
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
-
-#include <linux/sort.h>
-
-#define STACKDEPTH 8
-
-static noinline depot_stack_handle_t __save_depot_stack(void)
+static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm)
{
- unsigned long entries[STACKDEPTH];
- unsigned int n;
-
- n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
- return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
+ return container_of(rpm, struct drm_i915_private, runtime_pm);
}
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
+
static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- spin_lock_init(&rpm->debug.lock);
- stack_depot_init();
+ ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev));
}
-static noinline depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
- depot_stack_handle_t stack, *stacks;
- unsigned long flags;
-
- if (rpm->no_wakeref_tracking)
+ if (!rpm->available || rpm->no_wakeref_tracking)
return -1;
- stack = __save_depot_stack();
- if (!stack)
- return -1;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
-
- if (!rpm->debug.count)
- rpm->debug.last_acquire = stack;
-
- stacks = krealloc(rpm->debug.owners,
- (rpm->debug.count + 1) * sizeof(*stacks),
- GFP_NOWAIT | __GFP_NOWARN);
- if (stacks) {
- stacks[rpm->debug.count++] = stack;
- rpm->debug.owners = stacks;
- } else {
- stack = -1;
- }
-
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- return stack;
+ return intel_ref_tracker_alloc(&rpm->debug);
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- depot_stack_handle_t stack)
+ intel_wakeref_t wakeref)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
- unsigned long flags, n;
- bool found = false;
-
- if (unlikely(stack == -1))
+ if (!rpm->available || rpm->no_wakeref_tracking)
return;
- spin_lock_irqsave(&rpm->debug.lock, flags);
- for (n = rpm->debug.count; n--; ) {
- if (rpm->debug.owners[n] == stack) {
- memmove(rpm->debug.owners + n,
- rpm->debug.owners + n + 1,
- (--rpm->debug.count - n) * sizeof(stack));
- found = true;
- break;
- }
- }
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- if (drm_WARN(&i915->drm, !found,
- "Unmatched wakeref (tracking %lu), count %u\n",
- rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
- return;
-
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
-
- stack = READ_ONCE(rpm->debug.last_release);
- if (stack) {
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
- }
-
- kfree(buf);
- }
+ intel_ref_tracker_free(&rpm->debug, wakeref);
}
-static int cmphandle(const void *_a, const void *_b)
+static void untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
{
- const depot_stack_handle_t * const a = _a, * const b = _b;
-
- if (*a < *b)
- return -1;
- else if (*a > *b)
- return 1;
- else
- return 0;
-}
-
-static void
-__print_intel_runtime_pm_wakeref(struct drm_printer *p,
- const struct intel_runtime_pm_debug *dbg)
-{
- unsigned long i;
- char *buf;
-
- buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
- if (!buf)
- return;
-
- if (dbg->last_acquire) {
- stack_depot_snprint(dbg->last_acquire, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last acquired:\n%s", buf);
- }
-
- if (dbg->last_release) {
- stack_depot_snprint(dbg->last_release, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref last released:\n%s", buf);
- }
-
- drm_printf(p, "Wakeref count: %lu\n", dbg->count);
-
- sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
-
- for (i = 0; i < dbg->count; i++) {
- depot_stack_handle_t stack = dbg->owners[i];
- unsigned long rep;
-
- rep = 1;
- while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
- rep++, i++;
- stack_depot_snprint(stack, buf, PAGE_SIZE, 2);
- drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
- }
-
- kfree(buf);
-}
-
-static noinline void
-__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
- struct intel_runtime_pm_debug *saved)
-{
- *saved = *debug;
-
- debug->owners = NULL;
- debug->count = 0;
- debug->last_release = __save_depot_stack();
-}
-
-static void
-dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
-{
- if (debug->count) {
- struct drm_printer p = drm_debug_printer("i915");
-
- __print_intel_runtime_pm_wakeref(&p, debug);
- }
-
- kfree(debug->owners);
+ ref_tracker_dir_exit(&rpm->debug);
}
static noinline void
__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
{
- struct intel_runtime_pm_debug dbg = {};
unsigned long flags;
if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
@@ -235,60 +95,14 @@ __intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
flags))
return;
- __untrack_all_wakerefs(&rpm->debug, &dbg);
+ ref_tracker_dir_print_locked(&rpm->debug, INTEL_REFTRACK_PRINT_LIMIT);
spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
-}
-
-static noinline void
-untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
-{
- struct intel_runtime_pm_debug dbg = {};
- unsigned long flags;
-
- spin_lock_irqsave(&rpm->debug.lock, flags);
- __untrack_all_wakerefs(&rpm->debug, &dbg);
- spin_unlock_irqrestore(&rpm->debug.lock, flags);
-
- dump_and_free_wakeref_tracking(&dbg);
}
void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
struct drm_printer *p)
{
- struct intel_runtime_pm_debug dbg = {};
-
- do {
- unsigned long alloc = dbg.count;
- depot_stack_handle_t *s;
-
- spin_lock_irq(&rpm->debug.lock);
- dbg.count = rpm->debug.count;
- if (dbg.count <= alloc) {
- memcpy(dbg.owners,
- rpm->debug.owners,
- dbg.count * sizeof(*s));
- }
- dbg.last_acquire = rpm->debug.last_acquire;
- dbg.last_release = rpm->debug.last_release;
- spin_unlock_irq(&rpm->debug.lock);
- if (dbg.count <= alloc)
- break;
-
- s = krealloc(dbg.owners,
- dbg.count * sizeof(*s),
- GFP_NOWAIT | __GFP_NOWARN);
- if (!s)
- goto out;
-
- dbg.owners = s;
- } while (1);
-
- __print_intel_runtime_pm_wakeref(p, &dbg);
-
-out:
- kfree(dbg.owners);
+ intel_ref_tracker_show(&rpm->debug, p);
}
#else
@@ -297,14 +111,14 @@ static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
}
-static depot_stack_handle_t
+static intel_wakeref_t
track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
{
return -1;
}
static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
- intel_wakeref_t wref)
+ intel_wakeref_t wakeref)
{
}
@@ -349,9 +163,7 @@ intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
bool wakelock)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
int ret;
ret = pm_runtime_get_sync(rpm->kdev);
@@ -556,9 +368,7 @@ void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
*/
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct device *kdev = rpm->kdev;
/*
@@ -611,9 +421,7 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct device *kdev = rpm->kdev;
/* Transfer rpm ownership back to core */
@@ -628,9 +436,7 @@ void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 = container_of(rpm,
- struct drm_i915_private,
- runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
int count = atomic_read(&rpm->wakeref_count);
intel_wakeref_auto_fini(&rpm->userfault_wakeref);
@@ -639,14 +445,17 @@ void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
"i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
intel_rpm_raw_wakeref_count(count),
intel_rpm_wakelock_count(count));
+}
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm)
+{
+ intel_runtime_pm_driver_release(rpm);
untrack_all_intel_runtime_pm_wakerefs(rpm);
}
void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
{
- struct drm_i915_private *i915 =
- container_of(rpm, struct drm_i915_private, runtime_pm);
+ struct drm_i915_private *i915 = rpm_to_i915(rpm);
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct device *kdev = &pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index f79cda7a25..de3579d399 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -11,8 +11,6 @@
#include "intel_wakeref.h"
-#include "i915_utils.h"
-
struct device;
struct drm_i915_private;
struct drm_printer;
@@ -77,15 +75,7 @@ struct intel_runtime_pm {
* paired rpm_put) we can remove corresponding pairs of and keep
* the array trimmed to active wakerefs.
*/
- struct intel_runtime_pm_debug {
- spinlock_t lock;
-
- depot_stack_handle_t last_acquire;
- depot_stack_handle_t last_release;
-
- depot_stack_handle_t *owners;
- unsigned long count;
- } debug;
+ struct ref_tracker_dir debug;
#endif
};
@@ -189,6 +179,7 @@ void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm);
void intel_runtime_pm_enable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_disable(struct intel_runtime_pm *rpm);
void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm);
+void intel_runtime_pm_driver_last_release(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm);
intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c
index 623a690893..dea2f63184 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.c
+++ b/drivers/gpu/drm/i915/intel_wakeref.c
@@ -99,7 +99,8 @@ static void __intel_wakeref_put_work(struct work_struct *wrk)
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key)
+ struct intel_wakeref_lockclass *key,
+ const char *name)
{
wf->i915 = i915;
wf->ops = ops;
@@ -111,6 +112,10 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
INIT_DELAYED_WORK(&wf->work, __intel_wakeref_put_work);
lockdep_init_map(&wf->work.work.lockdep_map,
"wakeref.work", &key->work, 0);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name);
+#endif
}
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
@@ -191,3 +196,31 @@ void intel_wakeref_auto_fini(struct intel_wakeref_auto *wf)
intel_wakeref_auto(wf, 0);
INTEL_WAKEREF_BUG_ON(wf->wakeref);
}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p)
+{
+ const size_t buf_size = PAGE_SIZE;
+ char *buf, *sb, *se;
+ size_t count;
+
+ buf = kmalloc(buf_size, GFP_NOWAIT);
+ if (!buf)
+ return;
+
+ count = ref_tracker_dir_snprint(dir, buf, buf_size);
+ if (!count)
+ goto free;
+ /* printk does not like big buffers, so we split it */
+ for (sb = buf; *sb; sb = se + 1) {
+ se = strchrnul(sb, '\n');
+ drm_printf(p, "%.*s", (int)(se - sb + 1), sb);
+ if (!*se)
+ break;
+ }
+ if (count >= buf_size)
+ drm_printf(p, "\n...dropped %zd extra bytes of leak report.\n",
+ count + 1 - buf_size);
+free:
+ kfree(buf);
+}
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index ec881b0973..68aa3be482 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -7,16 +7,25 @@
#ifndef INTEL_WAKEREF_H
#define INTEL_WAKEREF_H
+#include <drm/drm_print.h>
+
#include <linux/atomic.h>
#include <linux/bitfield.h>
#include <linux/bits.h>
#include <linux/lockdep.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
+#include <linux/ref_tracker.h>
+#include <linux/slab.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
+typedef unsigned long intel_wakeref_t;
+
+#define INTEL_REFTRACK_DEAD_COUNT 16
+#define INTEL_REFTRACK_PRINT_LIMIT 16
+
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#else
@@ -26,8 +35,6 @@
struct intel_runtime_pm;
struct intel_wakeref;
-typedef depot_stack_handle_t intel_wakeref_t;
-
struct intel_wakeref_ops {
int (*get)(struct intel_wakeref *wf);
int (*put)(struct intel_wakeref *wf);
@@ -43,6 +50,10 @@ struct intel_wakeref {
const struct intel_wakeref_ops *ops;
struct delayed_work work;
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+ struct ref_tracker_dir debug;
+#endif
};
struct intel_wakeref_lockclass {
@@ -53,11 +64,12 @@ struct intel_wakeref_lockclass {
void __intel_wakeref_init(struct intel_wakeref *wf,
struct drm_i915_private *i915,
const struct intel_wakeref_ops *ops,
- struct intel_wakeref_lockclass *key);
-#define intel_wakeref_init(wf, i915, ops) do { \
+ struct intel_wakeref_lockclass *key,
+ const char *name);
+#define intel_wakeref_init(wf, i915, ops, name) do { \
static struct intel_wakeref_lockclass __key; \
\
- __intel_wakeref_init((wf), (i915), (ops), &__key); \
+ __intel_wakeref_init((wf), (i915), (ops), &__key, name); \
} while (0)
int __intel_wakeref_get_first(struct intel_wakeref *wf);
@@ -261,6 +273,57 @@ __intel_wakeref_defer_park(struct intel_wakeref *wf)
*/
int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+#define INTEL_WAKEREF_DEF ((intel_wakeref_t)(-1))
+
+static inline intel_wakeref_t intel_ref_tracker_alloc(struct ref_tracker_dir *dir)
+{
+ struct ref_tracker *user = NULL;
+
+ ref_tracker_alloc(dir, &user, GFP_NOWAIT);
+
+ return (intel_wakeref_t)user ?: INTEL_WAKEREF_DEF;
+}
+
+static inline void intel_ref_tracker_free(struct ref_tracker_dir *dir,
+ intel_wakeref_t handle)
+{
+ struct ref_tracker *user;
+
+ user = (handle == INTEL_WAKEREF_DEF) ? NULL : (void *)handle;
+
+ ref_tracker_free(dir, &user);
+}
+
+void intel_ref_tracker_show(struct ref_tracker_dir *dir,
+ struct drm_printer *p);
+
+#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF)
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return intel_ref_tracker_alloc(&wf->debug);
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+ intel_ref_tracker_free(&wf->debug, handle);
+}
+
+#else
+
+static inline intel_wakeref_t intel_wakeref_track(struct intel_wakeref *wf)
+{
+ return -1;
+}
+
+static inline void intel_wakeref_untrack(struct intel_wakeref *wf,
+ intel_wakeref_t handle)
+{
+}
+
+#endif
+
struct intel_wakeref_auto {
struct drm_i915_private *i915;
struct timer_list timer;
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index dc327cf40b..75278e78ca 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -199,6 +199,9 @@ int intel_pxp_init(struct drm_i915_private *i915)
struct intel_gt *gt;
bool is_full_feature = false;
+ if (intel_gt_is_wedged(to_gt(i915)))
+ return -ENOTCONN;
+
/*
* NOTE: Get the ctrl_gt before checking intel_pxp_is_supported since
* we still need it if PXP's backend tee transport is needed.
@@ -303,6 +306,8 @@ static int __pxp_global_teardown_final(struct intel_pxp *pxp)
if (!pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for suspend/fini");
/*
* To ensure synchronous and coherent session teardown completion
* in response to suspend or shutdown triggers, don't use a worker.
@@ -324,6 +329,8 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
if (pxp->arb_is_valid)
return 0;
+
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: teardown for restart");
/*
* The arb-session is currently inactive and we are doing a reset and restart
* due to a runtime event. Use the worker that was designed for this.
@@ -332,8 +339,11 @@ static int __pxp_global_teardown_restart(struct intel_pxp *pxp)
timeout = intel_pxp_get_backend_timeout_ms(pxp);
- if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout)))
+ if (!wait_for_completion_timeout(&pxp->termination, msecs_to_jiffies(timeout))) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: restart backend timed out (%d ms)",
+ timeout);
return -ETIMEDOUT;
+ }
return 0;
}
@@ -414,10 +424,12 @@ int intel_pxp_start(struct intel_pxp *pxp)
int ret = 0;
ret = intel_pxp_get_readiness_status(pxp, PXP_READINESS_TIMEOUT);
- if (ret < 0)
+ if (ret < 0) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: tried but not-avail (%d)", ret);
return ret;
- else if (ret > 1)
+ } else if (ret > 1) {
return -EIO; /* per UAPI spec, user may retry later */
+ }
mutex_lock(&pxp->arb_mutex);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index 91e9622c07..d81750b9bd 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -40,11 +40,12 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
/* immediately mark PXP as inactive on termination */
intel_pxp_mark_termination_in_progress(pxp);
- pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED |
+ PXP_EVENT_TYPE_IRQ;
}
if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
- pxp->session_events |= PXP_TERMINATION_COMPLETE;
+ pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
if (pxp->session_events)
queue_work(system_unbound_wq, &pxp->session_work);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
index 0a3e66b026..091c86e03d 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -137,8 +137,10 @@ void intel_pxp_terminate(struct intel_pxp *pxp, bool post_invalidation_needs_res
static void pxp_terminate_complete(struct intel_pxp *pxp)
{
/* Re-create the arb session after teardown handle complete */
- if (fetch_and_zero(&pxp->hw_state_invalidated))
+ if (fetch_and_zero(&pxp->hw_state_invalidated)) {
+ drm_dbg(&pxp->ctrl_gt->i915->drm, "PXP: creating arb_session after invalidation");
pxp_create_arb_session(pxp);
+ }
complete_all(&pxp->termination);
}
@@ -157,6 +159,8 @@ static void pxp_session_work(struct work_struct *work)
if (!events)
return;
+ drm_dbg(&gt->i915->drm, "PXP: processing event-flags 0x%08x", events);
+
if (events & PXP_INVAL_REQUIRED)
intel_pxp_invalidate(pxp);
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
index 7e11fa8034..07864b584c 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -124,6 +124,7 @@ struct intel_pxp {
#define PXP_TERMINATION_REQUEST BIT(0)
#define PXP_TERMINATION_COMPLETE BIT(1)
#define PXP_INVAL_REQUIRED BIT(2)
+#define PXP_EVENT_TYPE_IRQ BIT(3)
};
#endif /* __INTEL_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_syncmap.c b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
index 47f4ae18a1..88fa845e9f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_syncmap.c
+++ b/drivers/gpu/drm/i915/selftests/i915_syncmap.c
@@ -77,7 +77,7 @@ __sync_print(struct i915_syncmap *p,
for_each_set_bit(i, (unsigned long *)&p->bitmap, KSYNCMAP) {
buf = __sync_print(__sync_child(p)[i], buf, sz,
depth + 1,
- last << 1 | !!(p->bitmap >> (i + 1)),
+ last << 1 | ((p->bitmap >> (i + 1)) ? 1 : 0),
i);
}
}
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 2990dd4d4a..e14ac0ab13 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -3,6 +3,8 @@
* Copyright © 2021 Intel Corporation
*/
+#include <linux/jiffies.h>
+
//#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
#include "i915_drv.h"
@@ -12,7 +14,7 @@
#define REDUCED_TIMESLICE 5
#define REDUCED_PREEMPT 10
-#define WAIT_FOR_RESET_TIME 10000
+#define WAIT_FOR_RESET_TIME_MS 10000
struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
{
@@ -91,7 +93,7 @@ int intel_selftest_wait_for_rq(struct i915_request *rq)
{
long ret;
- ret = i915_request_wait(rq, 0, WAIT_FOR_RESET_TIME);
+ ret = i915_request_wait(rq, 0, msecs_to_jiffies(WAIT_FOR_RESET_TIME_MS));
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 03ea75cd84..4f98aa8a86 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -24,6 +24,8 @@
#include "../i915_selftest.h"
+#include "gt/intel_gt.h"
+
static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
unsigned int num_ranges,
bool is_watertight)
diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c
index f32e9f7877..40874ebfb6 100644
--- a/drivers/gpu/drm/i915/soc/intel_gmch.c
+++ b/drivers/gpu/drm/i915/soc/intel_gmch.c
@@ -33,18 +33,22 @@ int intel_gmch_bridge_setup(struct drm_i915_private *i915)
i915->gmch.pdev);
}
+static int mchbar_reg(struct drm_i915_private *i915)
+{
+ return GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
+}
+
/* Allocate space for the MCH regs if needed, return nonzero on error */
static int
intel_alloc_mchbar_resource(struct drm_i915_private *i915)
{
- int reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp_lo, temp_hi = 0;
u64 mchbar_addr;
int ret;
if (GRAPHICS_VER(i915) >= 4)
- pci_read_config_dword(i915->gmch.pdev, reg + 4, &temp_hi);
- pci_read_config_dword(i915->gmch.pdev, reg, &temp_lo);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4, &temp_hi);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp_lo);
mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
/* If ACPI doesn't have it, assume we need to allocate it ourselves */
@@ -68,10 +72,10 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
}
if (GRAPHICS_VER(i915) >= 4)
- pci_write_config_dword(i915->gmch.pdev, reg + 4,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915) + 4,
upper_32_bits(i915->gmch.mch_res.start));
- pci_write_config_dword(i915->gmch.pdev, reg,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
lower_32_bits(i915->gmch.mch_res.start));
return 0;
}
@@ -79,7 +83,6 @@ intel_alloc_mchbar_resource(struct drm_i915_private *i915)
/* Setup MCHBAR if possible, return true if we should disable it again */
void intel_gmch_bar_setup(struct drm_i915_private *i915)
{
- int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
u32 temp;
bool enabled;
@@ -92,7 +95,7 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915)
pci_read_config_dword(i915->gmch.pdev, DEVEN, &temp);
enabled = !!(temp & DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
enabled = temp & 1;
}
@@ -110,15 +113,13 @@ void intel_gmch_bar_setup(struct drm_i915_private *i915)
pci_write_config_dword(i915->gmch.pdev, DEVEN,
temp | DEVEN_MCHBAR_EN);
} else {
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg, &temp);
- pci_write_config_dword(i915->gmch.pdev, mchbar_reg, temp | 1);
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915), &temp);
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915), temp | 1);
}
}
void intel_gmch_bar_teardown(struct drm_i915_private *i915)
{
- int mchbar_reg = GRAPHICS_VER(i915) >= 4 ? MCHBAR_I965 : MCHBAR_I915;
-
if (i915->gmch.mchbar_need_disable) {
if (IS_I915G(i915) || IS_I915GM(i915)) {
u32 deven_val;
@@ -131,10 +132,10 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915)
} else {
u32 mchbar_val;
- pci_read_config_dword(i915->gmch.pdev, mchbar_reg,
+ pci_read_config_dword(i915->gmch.pdev, mchbar_reg(i915),
&mchbar_val);
mchbar_val &= ~1;
- pci_write_config_dword(i915->gmch.pdev, mchbar_reg,
+ pci_write_config_dword(i915->gmch.pdev, mchbar_reg(i915),
mchbar_val);
}
}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
index b98dec3ad8..ffa195560d 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.c
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -166,23 +166,6 @@ u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
return val;
}
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRWRDA_NP, reg, &val);
-}
-
u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
{
u32 val = 0;
@@ -227,9 +210,9 @@ static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy p
return IOSF_PORT_DPIO;
}
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 port = vlv_dpio_phy_iosf_port(i915, phy);
u32 val = 0;
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
@@ -239,16 +222,16 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
* so ideally we should check the register offset instead...
*/
drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
+ "DPIO PHY%d read reg 0x%x == 0x%x\n",
+ phy, reg, val);
return val;
}
void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val)
+ enum dpio_phy phy, int reg, u32 val)
{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 port = vlv_dpio_phy_iosf_port(i915, phy);
vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
}
diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index 9ce283d96b..c20cf41b2d 100644
--- a/drivers/gpu/drm/i915/vlv_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -11,7 +11,7 @@
#include "vlv_sideband_reg.h"
-enum pipe;
+enum dpio_phy;
struct drm_i915_private;
enum {
@@ -26,9 +26,6 @@ enum {
};
void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports);
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg);
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val);
void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports);
static inline void vlv_bunit_get(struct drm_i915_private *i915)
@@ -75,9 +72,9 @@ static inline void vlv_dpio_get(struct drm_i915_private *i915)
vlv_iosf_sb_get(i915, BIT(VLV_IOSF_SB_DPIO));
}
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg);
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum dpio_phy phy, int reg);
void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val);
+ enum dpio_phy phy, int reg, u32 val);
static inline void vlv_dpio_put(struct drm_i915_private *i915)
{