diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:22 +0000 |
commit | b20732900e4636a467c0183a47f7396700f5f743 (patch) | |
tree | 42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/gpu/drm/i915/display/skl_watermark.c | |
parent | Adding upstream version 6.8.12. (diff) | |
download | linux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz linux-b20732900e4636a467c0183a47f7396700f5f743.zip |
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/i915/display/skl_watermark.c')
-rw-r--r-- | drivers/gpu/drm/i915/display/skl_watermark.c | 108 |
1 files changed, 82 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 56588d6e24..c6b9be80d8 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -23,6 +23,12 @@ #include "skl_watermark.h" #include "skl_watermark_regs.h" +/*It is expected that DSB can do posted writes to every register in + * the pipe and planes within 100us. For flip queue use case, the + * recommended DSB execution time is 100us + one SAGV block time. + */ +#define DSB_EXE_TIME 100 + static void skl_sagv_disable(struct drm_i915_private *i915); /* Stores plane specific WM parameters */ @@ -443,12 +449,35 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { + struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; + new_bw_state = intel_atomic_get_bw_state(state); if (IS_ERR(new_bw_state)) return PTR_ERR(new_bw_state); old_bw_state = intel_atomic_get_old_bw_state(state); + /* + * We store use_sagv_wm in the crtc state rather than relying on + * that bw state since we have no convenient way to get at the + * latter from the plane commit hooks (especially in the legacy + * cursor case). + * + * drm_atomic_check_only() gets upset if we pull more crtcs + * into the state, so we have to calculate this based on the + * individual intel_crtc_can_enable_sagv() rather than + * the overall intel_can_enable_sagv(). Otherwise the + * crtcs not included in the commit would not switch to the + * SAGV watermarks when we are about to enable SAGV, and that + * would lead to underruns. This does mean extra power draw + * when only a subset of the crtcs are blocking SAGV as the + * other crtcs can't be allowed to use the more optimal + * normal (ie. non-SAGV) watermarks. + */ + pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) && + DISPLAY_VER(i915) >= 12 && + intel_crtc_can_enable_sagv(new_crtc_state); + if (intel_crtc_can_enable_sagv(new_crtc_state)) new_bw_state->pipe_sagv_reject &= ~BIT(crtc->pipe); else @@ -478,21 +507,6 @@ static int intel_compute_sagv_mask(struct intel_atomic_state *state) return ret; } - for_each_new_intel_crtc_in_state(state, crtc, - new_crtc_state, i) { - struct skl_pipe_wm *pipe_wm = &new_crtc_state->wm.skl.optimal; - - /* - * We store use_sagv_wm in the crtc state rather than relying on - * that bw state since we have no convenient way to get at the - * latter from the plane commit hooks (especially in the legacy - * cursor case) - */ - pipe_wm->use_sagv_wm = !HAS_HW_SAGV_WM(i915) && - DISPLAY_VER(i915) >= 12 && - intel_can_enable_sagv(i915, new_bw_state); - } - return 0; } @@ -1367,7 +1381,7 @@ skl_total_relative_data_rate(const struct intel_crtc_state *crtc_state) u64 data_rate = 0; for_each_plane_id_on_crtc(crtc, plane_id) { - if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) + if (plane_id == PLANE_CURSOR) continue; data_rate += crtc_state->rel_data_rate[plane_id]; @@ -1514,12 +1528,10 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, return 0; /* Allocate fixed number of blocks for cursor. */ - if (DISPLAY_VER(i915) < 20) { - cursor_size = skl_cursor_allocation(crtc_state, num_active); - iter.size -= cursor_size; - skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], - alloc->end - cursor_size, alloc->end); - } + cursor_size = skl_cursor_allocation(crtc_state, num_active); + iter.size -= cursor_size; + skl_ddb_entry_init(&crtc_state->wm.skl.plane_ddb[PLANE_CURSOR], + alloc->end - cursor_size, alloc->end); iter.data_rate = skl_total_relative_data_rate(crtc_state); @@ -1533,7 +1545,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) { + if (plane_id == PLANE_CURSOR) { const struct skl_ddb_entry *ddb = &crtc_state->wm.skl.plane_ddb[plane_id]; @@ -1581,7 +1593,7 @@ skl_crtc_allocate_plane_ddb(struct intel_atomic_state *state, const struct skl_plane_wm *wm = &crtc_state->wm.skl.optimal.planes[plane_id]; - if (plane_id == PLANE_CURSOR && DISPLAY_VER(i915) < 20) + if (plane_id == PLANE_CURSOR) continue; if (DISPLAY_VER(i915) < 11 && @@ -2898,12 +2910,51 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, return 0; } +/* + * If Fixed Refresh Rate: + * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from + * watermark level1 and up and above. If watermark level 1 is + * invalid program it with all 1's. + * Program PKG_C_LATENCY Added Wake Time = DSB execution time + * If Variable Refresh Rate: + * Program DEEP PKG_C_LATENCY Pkg C with all 1's. + * Program PKG_C_LATENCY Added Wake Time = 0 + */ +static void +skl_program_dpkgc_latency(struct drm_i915_private *i915, bool vrr_enabled) +{ + u32 max_latency = 0; + u32 clear = 0, val = 0; + u32 added_wake_time = 0; + + if (DISPLAY_VER(i915) < 20) + return; + + if (vrr_enabled) { + max_latency = LNL_PKG_C_LATENCY_MASK; + added_wake_time = 0; + } else { + max_latency = skl_watermark_max_latency(i915, 1); + if (max_latency == 0) + max_latency = LNL_PKG_C_LATENCY_MASK; + added_wake_time = DSB_EXE_TIME + + i915->display.sagv.block_time_us; + } + + clear |= LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK; + val |= REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, max_latency); + val |= REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time); + + intel_uncore_rmw(&i915->uncore, LNL_PKG_C_LATENCY, clear, val); +} + static int skl_compute_wm(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state __maybe_unused *new_crtc_state; int ret, i; + bool vrr_enabled = false; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { ret = skl_build_pipe_wm(state, crtc); @@ -2928,8 +2979,13 @@ skl_compute_wm(struct intel_atomic_state *state) ret = skl_wm_add_affected_planes(state, crtc); if (ret) return ret; + + if (new_crtc_state->vrr.enable) + vrr_enabled = true; } + skl_program_dpkgc_latency(to_i915(state->base.dev), vrr_enabled); + skl_print_wm_changes(state); return 0; @@ -3725,11 +3781,11 @@ void skl_watermark_debugfs_register(struct drm_i915_private *i915) &intel_sagv_status_fops); } -unsigned int skl_watermark_max_latency(struct drm_i915_private *i915) +unsigned int skl_watermark_max_latency(struct drm_i915_private *i915, int initial_wm_level) { int level; - for (level = i915->display.wm.num_levels - 1; level >= 0; level--) { + for (level = i915->display.wm.num_levels - 1; level >= initial_wm_level; level--) { unsigned int latency = skl_wm_latency(i915, level, NULL); if (latency) |