summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/core/dc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core/dc.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c461
1 files changed, 232 insertions, 229 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index b51208f44..3c3d613c5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -34,6 +34,8 @@
#include "dce/dce_hwseq.h"
#include "resource.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -409,9 +411,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->optimized_required)
return false;
+ if (!memcmp(&stream->adjust, adjust, sizeof(*adjust)))
+ return true;
+
+ dc_exit_ips_for_hw_access(dc);
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -452,6 +459,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
int i = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -482,6 +491,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
bool ret = false;
struct crtc_position position;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -601,6 +612,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
if (pipe == NULL)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
/* By default, capture the full frame */
param.windowa_x_start = 0;
param.windowa_y_start = 0;
@@ -660,6 +673,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
struct pipe_ctx *pipe;
struct timing_generator *tg;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream)
@@ -684,6 +699,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
int i;
struct pipe_ctx *pipe_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -719,6 +736,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
if (option > DITHER_OPTION_MAX)
return;
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
stream->dither_option = option;
memset(&params, 0, sizeof(params));
@@ -743,6 +762,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -760,6 +781,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -786,6 +809,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
struct pipe_ctx *pipes_affected[MAX_PIPES];
int num_pipes_affected = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < num_streams; i++) {
struct dc_stream_state *stream = streams[i];
@@ -808,7 +833,7 @@ static void dc_destruct(struct dc *dc)
link_enc_cfg_init(dc, dc->current_state);
if (dc->current_state) {
- dc_release_state(dc->current_state);
+ dc_state_release(dc->current_state);
dc->current_state = NULL;
}
@@ -1020,29 +1045,27 @@ static bool dc_construct(struct dc *dc,
}
#endif
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
- dc->current_state = dc_create_state(dc);
+ dc->current_state = dc_state_create(dc);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
goto fail;
}
- if (!create_links(dc, init_params->num_virtual_links))
- goto fail;
-
- /* Create additional DIG link encoder objects if fewer than the platform
- * supports were created during link construction.
- */
- if (!create_link_encoders(dc))
- goto fail;
-
- dc_resource_state_construct(dc, dc->current_state);
-
return true;
fail:
@@ -1085,7 +1108,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
}
}
-static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1105,9 +1128,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
- get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
- get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
@@ -1115,7 +1138,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state(dc);
+ struct dc_state *dangling_context = dc_state_create_current_copy(dc);
struct dc_state *current_ctx;
struct pipe_ctx *pipe;
struct timing_generator *tg;
@@ -1123,8 +1146,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (dangling_context == NULL)
return;
- dc_resource_state_copy_construct(dc->current_state, dangling_context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
@@ -1161,6 +1182,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
+ bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
/* When disabling plane for a phantom pipe, we must turn on the
@@ -1169,22 +1191,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* state that can result in underflow or hang when enabling it
* again for different use.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->enable_crtc) {
int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
- main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ main_pipe_width = old_paired_stream->dst.width;
+ main_pipe_height = old_paired_stream->dst.height;
if (dc->hwss.blank_phantom)
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
}
}
- dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+
+ if (is_phantom)
+ dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
+ else
+ dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
if (dc->hwss.apply_ctx_for_surface) {
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
@@ -1203,7 +1232,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* The OTG is set to disable on falling edge of VUPDATE so the plane disable
* will still get it's double buffer update.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
}
@@ -1212,7 +1241,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
current_ctx = dc->current_state;
dc->current_state = dangling_context;
- dc_release_state(current_ctx);
+ dc_state_release(current_ctx);
}
static void disable_vbios_mode_if_required(
@@ -1276,6 +1305,54 @@ static void disable_vbios_mode_if_required(
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1284,7 +1361,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1510,7 +1587,7 @@ static void program_timing_sync(
}
for (k = 0; k < group_size; k++) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
+ struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
status->timing_sync_info.group_id = num_group;
status->timing_sync_info.group_size = group_size;
@@ -1521,7 +1598,7 @@ static void program_timing_sync(
}
- /* remove any other pipes that are already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
if (dc->config.use_pipe_ctx_sync_logic) {
/* check pipe's syncd to decide which pipe to be removed */
for (j = 1; j < group_size; j++) {
@@ -1534,6 +1611,7 @@ static void program_timing_sync(
pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
}
} else {
+ /* remove any other pipes by checking valid plane */
for (j = j + 1; j < group_size; j++) {
bool is_blanked;
@@ -1554,7 +1632,7 @@ static void program_timing_sync(
if (group_size > 1) {
if (sync_type == TIMING_SYNCHRONIZABLE) {
dc->hwss.enable_timing_synchronization(
- dc, group_index, group_size, pipe_set);
+ dc, ctx, group_index, group_size, pipe_set);
} else
if (sync_type == VBLANK_SYNCHRONIZABLE) {
dc->hwss.enable_vblanks_synchronization(
@@ -1759,6 +1837,8 @@ void dc_enable_stereo(
int i, j;
struct pipe_ctx *pipe;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (context != NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1778,6 +1858,8 @@ void dc_enable_stereo(
void dc_trigger_sync(struct dc *dc, struct dc_state *context)
{
if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
}
@@ -1836,7 +1918,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* Check old context for SubVP */
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -1962,6 +2044,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -1994,9 +2081,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
old_state = dc->current_state;
dc->current_state = context;
- dc_release_state(old_state);
+ dc_state_release(old_state);
- dc_retain_state(dc->current_state);
+ dc_state_retain(dc->current_state);
return result;
}
@@ -2034,6 +2121,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (!streams_changed(dc, streams, stream_count))
return res;
+ dc_exit_ips_for_hw_access(dc);
+
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
for (i = 0; i < stream_count; i++) {
@@ -2067,12 +2156,10 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (handle_exit_odm2to1)
res = commit_minimal_transition_state(dc, dc->current_state);
- context = dc_create_state(dc);
+ context = dc_state_create_current_copy(dc);
if (!context)
goto context_alloc_fail;
- dc_resource_state_copy_construct_current(dc, context);
-
res = dc_validate_with_context(dc, set, stream_count, context, false);
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
@@ -2087,7 +2174,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
if (dc_is_embedded_signal(streams[i]->signal)) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
+ struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
if (dc->hwss.is_abm_supported)
status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
@@ -2098,7 +2185,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
fail:
- dc_release_state(context);
+ dc_state_release(context);
context_alloc_fail:
@@ -2152,7 +2239,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
- if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
+ if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
continue;
/* Must set to false to start with, due to OR in update function */
@@ -2210,7 +2297,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
}
process_deferred_updates(dc);
@@ -2222,104 +2309,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
-}
-
-static void init_state(struct dc *dc, struct dc_state *context)
-{
- /* Each context must have their own instance of VBA and in order to
- * initialize and obtain IP and SOC the base DML instance from DC is
- * initially copied into every context
- */
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
-}
-
-struct dc_state *dc_create_state(struct dc *dc)
-{
- struct dc_state *context = kvzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
-
- if (!context)
- return NULL;
-
- init_state(dc, context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (dc->debug.using_dml2) {
- dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
- }
-#endif
- kref_init(&context->refcount);
-
- return context;
-}
-
-struct dc_state *dc_copy_state(struct dc_state *src_ctx)
-{
- int i, j;
- struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
-
- if (!new_ctx)
- return NULL;
- memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) {
- dc_release_state(new_ctx);
- return NULL;
- }
-#endif
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
-
- if (cur_pipe->top_pipe)
- cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
- if (cur_pipe->bottom_pipe)
- cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
- if (cur_pipe->prev_odm_pipe)
- cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
-
- if (cur_pipe->next_odm_pipe)
- cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
- }
-
- for (i = 0; i < new_ctx->stream_count; i++) {
- dc_stream_retain(new_ctx->streams[i]);
- for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
- dc_plane_state_retain(
- new_ctx->stream_status[i].plane_states[j]);
- }
-
- kref_init(&new_ctx->refcount);
-
- return new_ctx;
-}
-
-void dc_retain_state(struct dc_state *context)
-{
- kref_get(&context->refcount);
-}
-
-static void dc_state_free(struct kref *kref)
-{
- struct dc_state *context = container_of(kref, struct dc_state, refcount);
- dc_resource_state_destruct(context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- dml2_destroy(context->bw_ctx.dml2);
- context->bw_ctx.dml2 = 0;
-#endif
-
- kvfree(context);
-}
-
-void dc_release_state(struct dc_state *context)
-{
- kref_put(&context->refcount, dc_state_free);
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2742,8 +2731,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
-
- dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -2951,9 +2938,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust)
- stream->adjust = *update->crtc_timing_adjust;
-
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -2994,11 +2978,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config->num_slices_v != 0);
/* Use temporarry context for validating new DSC config */
- struct dc_state *dsc_validate_context = dc_create_state(dc);
+ struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
if (dsc_validate_context) {
- dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
-
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
@@ -3007,7 +2989,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config = NULL;
}
- dc_release_state(dsc_validate_context);
+ dc_state_release(dsc_validate_context);
} else {
DC_ERROR("Failed to allocate new validate context for DSC change\n");
update->dsc_config = NULL;
@@ -3106,30 +3088,27 @@ static bool update_planes_and_stream_state(struct dc *dc,
new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(dc->current_state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return false;
}
- dc_resource_state_copy_construct(
- dc->current_state, context);
-
/* For each full update, remove all existing phantom pipes first.
* Ensures that we have enough pipes for newly added MPO planes
*/
- if (dc->res_pool->funcs->remove_phantom_pipes)
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
/*remove old surfaces from context */
- if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+ if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
BREAK_TO_DEBUGGER();
goto fail;
}
/* add surface to context */
- if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+ if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -3154,19 +3133,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- /* For phantom pipes we remove and create a new set of phantom pipes
- * for each full update (because we don't know if we'll need phantom
- * pipes until after the first round of validation). However, if validation
- * fails we need to keep the existing phantom pipes (because we don't update
- * the dc->current_state).
- *
- * The phantom stream/plane refcount is decremented for validation because
- * we assume it'll be removed (the free comes when the dc_state is freed),
- * but if validation fails we have to increment back the refcount so it's
- * consistent.
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3187,7 +3153,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
return true;
fail:
- dc_release_state(context);
+ dc_state_release(context);
return false;
@@ -3488,18 +3454,26 @@ static void commit_planes_for_stream_fast(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
+ struct dc_stream_status *stream_status = NULL;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
- if (dc->debug.visual_confirm) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ if (!top_pipe_to_program)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
}
@@ -3523,6 +3497,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
}
}
+ stream_status = dc_state_get_stream_status(context, stream);
+
build_dmub_cmd_list(dc,
srf_updates,
surface_count,
@@ -3535,7 +3511,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->dmub_cmd_count,
context->block_sequence,
&(context->block_sequence_steps),
- top_pipe_to_program);
+ top_pipe_to_program,
+ stream_status);
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
@@ -3548,7 +3525,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
@@ -3586,6 +3563,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
@@ -3607,6 +3585,8 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
@@ -3631,7 +3611,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
// Check old context for SubVP
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -3639,19 +3619,22 @@ static void commit_planes_for_stream(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
subvp_curr_use = true;
break;
}
}
- if (dc->debug.visual_confirm)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
+ }
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
@@ -3918,7 +3901,9 @@ static void commit_planes_for_stream(struct dc *dc,
* programming has completed (we turn on phantom OTG in order
* to complete the plane disable for phantom pipes).
*/
- dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
@@ -4024,7 +4009,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
subvp_active = true;
break;
}
@@ -4061,7 +4046,7 @@ struct pipe_split_policy_backup {
static void release_minimal_transition_state(struct dc *dc,
struct dc_state *context, struct pipe_split_policy_backup *policy)
{
- dc_release_state(context);
+ dc_state_release(context);
/* restore previous pipe split and odm policy */
if (!dc->config.is_vmin_only_asic)
dc->debug.pipe_split_policy = policy->mpc_policy;
@@ -4072,7 +4057,7 @@ static void release_minimal_transition_state(struct dc *dc,
static struct dc_state *create_minimal_transition_state(struct dc *dc,
struct dc_state *base_context, struct pipe_split_policy_backup *policy)
{
- struct dc_state *minimal_transition_context = dc_create_state(dc);
+ struct dc_state *minimal_transition_context = NULL;
unsigned int i, j;
if (!dc->config.is_vmin_only_asic) {
@@ -4084,7 +4069,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
- dc_resource_state_copy_construct(base_context, minimal_transition_context);
+ minimal_transition_context = dc_state_create_copy(base_context);
+ if (!minimal_transition_context)
+ return NULL;
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
@@ -4116,7 +4103,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
bool success = false;
struct dc_state *minimal_transition_context;
struct pipe_split_policy_backup policy;
- struct mall_temp_config mall_temp_config;
/* commit based on new context */
/* Since all phantom pipes are removed in full validation,
@@ -4125,8 +4111,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
minimal_transition_context = create_minimal_transition_state(dc,
context, &policy);
if (minimal_transition_context) {
@@ -4139,16 +4123,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
}
release_minimal_transition_state(dc, minimal_transition_context, &policy);
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
}
if (!success) {
@@ -4216,7 +4190,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
subvp_in_use = true;
break;
}
@@ -4403,8 +4377,7 @@ static bool full_update_required(struct dc *dc,
stream_update->mst_bw_update ||
stream_update->func_shaper ||
stream_update->lut3d_func ||
- stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust))
+ stream_update->pending_test_pattern))
return true;
if (stream) {
@@ -4484,7 +4457,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *context;
enum surface_update_type update_type;
int i;
- struct mall_temp_config mall_temp_config;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
@@ -4495,6 +4467,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
bool is_plane_addition = 0;
bool is_fast_update_only;
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
@@ -4528,23 +4502,10 @@ bool dc_update_planes_and_stream(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
if (!commit_minimal_transition_state(dc, context)) {
- dc_release_state(context);
+ dc_state_release(context);
return false;
}
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
-
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
update_type = UPDATE_TYPE_FULL;
}
@@ -4601,7 +4562,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
// clear any forced full updates
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4628,6 +4589,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
int i, j;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -4660,14 +4623,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
}
- dc_resource_state_copy_construct(state, context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4706,7 +4667,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
- dc_release_state(context);
+ dc_state_release(context);
return;
}
}
@@ -4739,7 +4700,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -4812,7 +4773,9 @@ void dc_set_power_state(
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
- dc_resource_state_construct(dc, dc->current_state);
+ dc_state_construct(dc, dc->current_state);
+
+ dc_exit_ips_for_hw_access(dc);
dc_z10_restore(dc);
@@ -4827,7 +4790,7 @@ void dc_set_power_state(
default:
ASSERT(dc->current_state->stream_count == 0);
- dc_resource_state_destruct(dc->current_state);
+ dc_state_destruct(dc->current_state);
break;
}
@@ -4904,6 +4867,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
return true;
}
+/* enable/disable eDP Replay without specify stream for eDP */
+bool dc_set_replay_allow_active(struct dc *dc, bool active)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->replay_settings.replay_feature_enabled) {
+ if (active && !link->replay_settings.replay_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ false, false, NULL))
+ return false;
+ } else if (!active && link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
void dc_allow_idle_optimizations(struct dc *dc, bool allow)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -4923,6 +4918,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
+void dc_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, false);
+}
+
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -5443,6 +5444,8 @@ bool dc_abm_save_restore(
struct dc_link *link = stream->sink->link;
struct dc_link *edp_links[MAX_NUM_EDP];
+ if (link->replay_settings.replay_feature_enabled)
+ return false;
/*find primary pipe associated with stream*/
for (i = 0; i < MAX_PIPES; i++) {