summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc/core/dc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/core/dc.c')
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c462
1 files changed, 359 insertions, 103 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 8cdf380bf..bbdeda489 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -24,6 +24,8 @@
#include "dm_services.h"
+#include "amdgpu.h"
+
#include "dc.h"
#include "core_status.h"
@@ -75,6 +77,8 @@
#include "hw_sequencer_private.h"
+#include "dml2/dml2_internal_types.h"
+
#include "dce/dmub_outbox.h"
#define CTX \
@@ -515,7 +519,7 @@ dc_stream_forward_dmub_crc_window(struct dc_dmub_srv *dmub_srv,
cmd.secure_display.roi_info.y_end = rect->y + rect->height;
}
- dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
static inline void
@@ -828,6 +832,7 @@ static void dc_destruct(struct dc *dc)
if (dc->ctx->created_bios)
dal_bios_parser_destroy(&dc->ctx->dc_bios);
+ kfree(dc->ctx->logger);
dc_perf_trace_destroy(&dc->ctx->perf_trace);
kfree(dc->ctx);
@@ -868,8 +873,18 @@ static bool dc_construct_ctx(struct dc *dc,
dc_ctx->dce_environment = init_params->dce_environment;
dc_ctx->dcn_reg_offsets = init_params->dcn_reg_offsets;
dc_ctx->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc_ctx->clk_reg_offsets = init_params->clk_reg_offsets;
/* Create logger */
+ dc_ctx->logger = kmalloc(sizeof(*dc_ctx->logger), GFP_KERNEL);
+
+ if (!dc_ctx->logger) {
+ kfree(dc_ctx);
+ return false;
+ }
+
+ dc_ctx->logger->dev = adev_to_drm(init_params->driver);
+ dc->dml.logger = dc_ctx->logger;
dc_ctx->dce_version = resource_parse_asic_id(init_params->asic_id);
@@ -1325,6 +1340,7 @@ struct dc *dc_create(const struct dc_init_data *init_params)
dc->dcn_reg_offsets = init_params->dcn_reg_offsets;
dc->nbio_reg_offsets = init_params->nbio_reg_offsets;
+ dc->clk_reg_offsets = init_params->clk_reg_offsets;
/* Populate versioning information */
dc->versions.dc_ver = DC_VER;
@@ -1948,6 +1964,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
wait_for_no_pipes_pending(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
+ /* Need to do otg sync again as otg could be out of sync due to otg
+ * workaround applied during clock update
+ */
+ dc_trigger_sync(dc, context);
}
if (dc->hwss.update_dsc_pg)
@@ -2224,6 +2244,11 @@ struct dc_state *dc_create_state(struct dc *dc)
init_state(dc, context);
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (dc->debug.using_dml2) {
+ dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
+ }
+#endif
kref_init(&context->refcount);
return context;
@@ -2233,11 +2258,25 @@ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
{
int i, j;
struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_context *dml2 = NULL;
+#endif
if (!new_ctx)
return NULL;
memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (new_ctx->bw_ctx.dml2) {
+ dml2 = kzalloc(sizeof(struct dml2_context), GFP_KERNEL);
+ if (!dml2)
+ return NULL;
+
+ memcpy(dml2, src_ctx->bw_ctx.dml2, sizeof(struct dml2_context));
+ new_ctx->bw_ctx.dml2 = dml2;
+ }
+#endif
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
@@ -2276,6 +2315,12 @@ static void dc_state_free(struct kref *kref)
{
struct dc_state *context = container_of(kref, struct dc_state, refcount);
dc_resource_state_destruct(context);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dml2_destroy(context->bw_ctx.dml2);
+ context->bw_ctx.dml2 = 0;
+#endif
+
kvfree(context);
}
@@ -2541,6 +2586,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (u->gamut_remap_matrix)
update_flags->bits.gamut_remap_change = 1;
+ if (u->blend_tf)
+ update_flags->bits.gamma_change = 1;
+
if (u->gamma) {
enum surface_pixel_format format = SURFACE_PIXEL_FORMAT_GRPH_BEGIN;
@@ -2974,6 +3022,34 @@ static void copy_stream_update_to_stream(struct dc *dc,
}
}
+static void backup_plane_states_for_stream(
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++)
+ plane_states[i] = *status->plane_states[i];
+}
+
+static void restore_plane_states_for_stream(
+ struct dc_plane_state plane_states[MAX_SURFACE_NUM],
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct dc_stream_status *status = dc_stream_get_status(stream);
+
+ if (!status)
+ return;
+
+ for (i = 0; i < status->plane_count; i++)
+ *status->plane_states[i] = plane_states[i];
+}
+
static bool update_planes_and_stream_state(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -2997,7 +3073,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
}
context = dc->current_state;
-
+ backup_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
update_type = dc_check_update_surfaces_for_stream(
dc, srf_updates, surface_count, stream_update, stream_status);
@@ -3027,6 +3103,9 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type >= update_surface_trace_level)
update_surface_trace(dc, srf_updates, surface_count);
+ for (i = 0; i < surface_count; i++)
+ copy_surface_update_to_plane(srf_updates[i].surface, &srf_updates[i]);
+
if (update_type >= UPDATE_TYPE_FULL) {
struct dc_plane_state *new_planes[MAX_SURFACES] = {0};
@@ -3068,8 +3147,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *surface = srf_updates[i].surface;
- copy_surface_update_to_plane(surface, &srf_updates[i]);
-
if (update_type >= UPDATE_TYPE_MED) {
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
@@ -3100,10 +3177,19 @@ static bool update_planes_and_stream_state(struct dc *dc,
BREAK_TO_DEBUGGER();
goto fail;
}
+
+ for (i = 0; i < context->stream_count; i++) {
+ struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
+ context->streams[i]);
+
+ if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
+ resource_build_test_pattern_params(&context->res_ctx, otg_master);
+ }
}
*new_context = context;
*new_update_type = update_type;
+ backup_plane_states_for_stream(context->scratch.plane_states, stream);
return true;
@@ -3304,7 +3390,7 @@ void dc_dmub_update_dirty_rect(struct dc *dc,
update_dirty_rect->panel_inst = panel_inst;
update_dirty_rect->pipe_idx = j;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
}
}
@@ -3475,7 +3561,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
*/
int pipe_idx;
int opp_inst;
- int opp_count = dc->res_pool->pipe_count;
+ int opp_count = dc->res_pool->res_cap->num_opp;
struct hubp *hubp;
int mpcc_inst;
const struct pipe_ctx *pipe_ctx;
@@ -3496,7 +3582,8 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
mpcc_inst = hubp->inst;
// MPCC inst is equal to pipe index in practice
for (opp_inst = 0; opp_inst < opp_count; opp_inst++) {
- if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) {
+ if ((dc->res_pool->opps[opp_inst] != NULL) &&
+ (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst])) {
dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst);
dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst] = false;
break;
@@ -3518,6 +3605,7 @@ static void commit_planes_for_stream(struct dc *dc,
bool should_lock_all_pipes = (update_type != UPDATE_TYPE_FAST);
bool subvp_prev_use = false;
bool subvp_curr_use = false;
+ uint8_t current_stream_mask = 0;
// Once we apply the new subvp context to hardware it won't be in the
// dc->current_state anymore, so we have to cache it before we apply
@@ -3542,7 +3630,7 @@ static void commit_planes_for_stream(struct dc *dc,
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
-
+ ASSERT(top_pipe_to_program != NULL);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -3867,6 +3955,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (pipe_ctx->stream_res.tg->funcs->program_manual_trigger)
pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
}
+
+ current_stream_mask = get_stream_mask(dc, context);
+ if (current_stream_mask != context->stream_mask) {
+ context->stream_mask = current_stream_mask;
+ dc_dmub_srv_notify_stream_mask(dc->ctx->dmub_srv, current_stream_mask);
+ }
}
/**
@@ -3874,6 +3968,7 @@ static void commit_planes_for_stream(struct dc *dc,
*
* @dc: Used to get the current state status
* @stream: Target stream, which we want to remove the attached planes
+ * @srf_updates: Array of surface updates
* @surface_count: Number of surface update
* @is_plane_addition: [in] Fill out with true if it is a plane addition case
*
@@ -3890,6 +3985,7 @@ static void commit_planes_for_stream(struct dc *dc,
*/
static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
struct dc_stream_state *stream,
+ struct dc_surface_update *srf_updates,
int surface_count,
bool *is_plane_addition)
{
@@ -3960,6 +4056,127 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
return force_minimal_pipe_splitting;
}
+struct pipe_split_policy_backup {
+ bool dynamic_odm_policy;
+ bool subvp_policy;
+ enum pipe_split_policy mpc_policy;
+};
+
+static void release_minimal_transition_state(struct dc *dc,
+ struct dc_state *context, struct pipe_split_policy_backup *policy)
+{
+ dc_release_state(context);
+ /* restore previous pipe split and odm policy */
+ if (!dc->config.is_vmin_only_asic)
+ dc->debug.pipe_split_policy = policy->mpc_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = policy->dynamic_odm_policy;
+ dc->debug.force_disable_subvp = policy->subvp_policy;
+}
+
+static struct dc_state *create_minimal_transition_state(struct dc *dc,
+ struct dc_state *base_context, struct pipe_split_policy_backup *policy)
+{
+ struct dc_state *minimal_transition_context = dc_create_state(dc);
+ unsigned int i, j;
+
+ if (!dc->config.is_vmin_only_asic) {
+ policy->mpc_policy = dc->debug.pipe_split_policy;
+ dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
+ }
+ policy->dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
+ dc->debug.enable_single_display_2to1_odm_policy = false;
+ policy->subvp_policy = dc->debug.force_disable_subvp;
+ dc->debug.force_disable_subvp = true;
+
+ dc_resource_state_copy_construct(base_context, minimal_transition_context);
+
+ /* commit minimal state */
+ if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
+ for (i = 0; i < minimal_transition_context->stream_count; i++) {
+ struct dc_stream_status *stream_status = &minimal_transition_context->stream_status[i];
+
+ for (j = 0; j < stream_status->plane_count; j++) {
+ struct dc_plane_state *plane_state = stream_status->plane_states[j];
+
+ /* force vsync flip when reconfiguring pipes to prevent underflow
+ * and corruption
+ */
+ plane_state->flip_immediate = false;
+ }
+ }
+ } else {
+ /* this should never happen */
+ release_minimal_transition_state(dc, minimal_transition_context, policy);
+ BREAK_TO_DEBUGGER();
+ minimal_transition_context = NULL;
+ }
+ return minimal_transition_context;
+}
+
+static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
+ struct dc_state *context,
+ struct dc_stream_state *stream)
+{
+ bool success = false;
+ struct dc_state *minimal_transition_context;
+ struct pipe_split_policy_backup policy;
+ struct mall_temp_config mall_temp_config;
+
+ /* commit based on new context */
+ /* Since all phantom pipes are removed in full validation,
+ * we have to save and restore the subvp/mall config when
+ * we do a minimal transition since the flags marking the
+ * pipe as subvp/phantom will be cleared (dc copy constructor
+ * creates a shallow copy).
+ */
+ if (dc->res_pool->funcs->save_mall_state)
+ dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
+ minimal_transition_context = create_minimal_transition_state(dc,
+ context, &policy);
+ if (minimal_transition_context) {
+ if (dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, minimal_transition_context) &&
+ dc->hwss.is_pipe_topology_transition_seamless(
+ dc, minimal_transition_context, context)) {
+ DC_LOG_DC("%s base = new state\n", __func__);
+
+ success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
+ }
+ release_minimal_transition_state(dc, minimal_transition_context, &policy);
+ if (dc->res_pool->funcs->restore_mall_state)
+ dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
+ /* If we do a minimal transition with plane removal and the context
+ * has subvp we also have to retain back the phantom stream / planes
+ * since the refcount is decremented as part of the min transition
+ * (we commit a state with no subvp, so the phantom streams / planes
+ * had to be removed).
+ */
+ if (dc->res_pool->funcs->retain_phantom_pipes)
+ dc->res_pool->funcs->retain_phantom_pipes(dc, context);
+ }
+
+ if (!success) {
+ /* commit based on current context */
+ restore_plane_states_for_stream(dc->current_state->scratch.plane_states, stream);
+ minimal_transition_context = create_minimal_transition_state(dc,
+ dc->current_state, &policy);
+ if (minimal_transition_context) {
+ if (dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, minimal_transition_context) &&
+ dc->hwss.is_pipe_topology_transition_seamless(
+ dc, minimal_transition_context, context)) {
+ DC_LOG_DC("%s base = current state\n", __func__);
+ success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
+ }
+ release_minimal_transition_state(dc, minimal_transition_context, &policy);
+ }
+ restore_plane_states_for_stream(context->scratch.plane_states, stream);
+ }
+
+ ASSERT(success);
+ return success;
+}
+
/**
* commit_minimal_transition_state - Create a transition pipe split state
*
@@ -3981,23 +4198,14 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
static bool commit_minimal_transition_state(struct dc *dc,
struct dc_state *transition_base_context)
{
- struct dc_state *transition_context = dc_create_state(dc);
- enum pipe_split_policy tmp_mpc_policy = 0;
- bool temp_dynamic_odm_policy = 0;
- bool temp_subvp_policy = 0;
+ struct dc_state *transition_context;
+ struct pipe_split_policy_backup policy;
enum dc_status ret = DC_ERROR_UNEXPECTED;
unsigned int i, j;
unsigned int pipe_in_use = 0;
bool subvp_in_use = false;
bool odm_in_use = false;
- if (!transition_context)
- return false;
- /* Setup:
- * Store the current ODM and MPC config in some temp variables to be
- * restored after we commit the transition state.
- */
-
/* check current pipes in use*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
@@ -4022,10 +4230,10 @@ static bool commit_minimal_transition_state(struct dc *dc,
* pipe, we must use the minimal transition.
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = &transition_base_context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->next_odm_pipe) {
- odm_in_use = true;
+ if (resource_is_pipe_type(pipe, OTG_MASTER)) {
+ odm_in_use = resource_get_odm_slice_count(pipe) > 1;
break;
}
}
@@ -4038,54 +4246,23 @@ static bool commit_minimal_transition_state(struct dc *dc,
* Reduce the scenarios to use dc_commit_state_no_check in the stage of flip. Especially
* enter/exit MPO when DCN still have enough resources.
*/
- if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use) {
- dc_release_state(transition_context);
+ if (pipe_in_use != dc->res_pool->pipe_count && !subvp_in_use && !odm_in_use)
return true;
- }
-
- if (!dc->config.is_vmin_only_asic) {
- tmp_mpc_policy = dc->debug.pipe_split_policy;
- dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
- }
-
- temp_dynamic_odm_policy = dc->debug.enable_single_display_2to1_odm_policy;
- dc->debug.enable_single_display_2to1_odm_policy = false;
-
- temp_subvp_policy = dc->debug.force_disable_subvp;
- dc->debug.force_disable_subvp = true;
-
- dc_resource_state_copy_construct(transition_base_context, transition_context);
-
- /* commit minimal state */
- if (dc->res_pool->funcs->validate_bandwidth(dc, transition_context, false)) {
- for (i = 0; i < transition_context->stream_count; i++) {
- struct dc_stream_status *stream_status = &transition_context->stream_status[i];
-
- for (j = 0; j < stream_status->plane_count; j++) {
- struct dc_plane_state *plane_state = stream_status->plane_states[j];
- /* force vsync flip when reconfiguring pipes to prevent underflow
- * and corruption
- */
- plane_state->flip_immediate = false;
- }
- }
+ DC_LOG_DC("%s base = %s state, reason = %s\n", __func__,
+ dc->current_state == transition_base_context ? "current" : "new",
+ subvp_in_use ? "Subvp In Use" :
+ odm_in_use ? "ODM in Use" :
+ dc->debug.pipe_split_policy != MPC_SPLIT_AVOID ? "MPC in Use" :
+ "Unknown");
+ transition_context = create_minimal_transition_state(dc,
+ transition_base_context, &policy);
+ if (transition_context) {
ret = dc_commit_state_no_check(dc, transition_context);
+ release_minimal_transition_state(dc, transition_context, &policy);
}
- /* always release as dc_commit_state_no_check retains in good case */
- dc_release_state(transition_context);
-
- /* TearDown:
- * Restore original configuration for ODM and MPO.
- */
- if (!dc->config.is_vmin_only_asic)
- dc->debug.pipe_split_policy = tmp_mpc_policy;
-
- dc->debug.enable_single_display_2to1_odm_policy = temp_dynamic_odm_policy;
- dc->debug.force_disable_subvp = temp_subvp_policy;
-
if (ret != DC_OK) {
/* this should never happen */
BREAK_TO_DEBUGGER();
@@ -4198,7 +4375,6 @@ static bool full_update_required(struct dc *dc,
srf_updates[i].in_transfer_func ||
srf_updates[i].func_shaper ||
srf_updates[i].lut3d_func ||
- srf_updates[i].blend_tf ||
srf_updates[i].surface->force_full_update ||
(srf_updates[i].flip_addr &&
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
@@ -4257,6 +4433,53 @@ static bool fast_update_only(struct dc *dc,
&& !full_update_required(dc, srf_updates, surface_count, stream_update, stream);
}
+static bool should_commit_minimal_transition_for_windowed_mpo_odm(struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *context)
+{
+ struct pipe_ctx *cur_pipe, *new_pipe;
+ bool cur_is_odm_in_use, new_is_odm_in_use;
+ struct dc_stream_status *cur_stream_status = stream_get_status(dc->current_state, stream);
+ struct dc_stream_status *new_stream_status = stream_get_status(context, stream);
+
+ if (!dc->debug.enable_single_display_2to1_odm_policy ||
+ !dc->config.enable_windowed_mpo_odm)
+ /* skip the check if windowed MPO ODM or dynamic ODM is turned
+ * off.
+ */
+ return false;
+
+ if (context == dc->current_state)
+ /* skip the check for fast update */
+ return false;
+
+ if (new_stream_status->plane_count != cur_stream_status->plane_count)
+ /* plane count changed, not a plane scaling update so not the
+ * case we are looking for
+ */
+ return false;
+
+ cur_pipe = resource_get_otg_master_for_stream(&dc->current_state->res_ctx, stream);
+ new_pipe = resource_get_otg_master_for_stream(&context->res_ctx, stream);
+ if (!cur_pipe || !new_pipe)
+ return false;
+ cur_is_odm_in_use = resource_get_odm_slice_count(cur_pipe) > 1;
+ new_is_odm_in_use = resource_get_odm_slice_count(new_pipe) > 1;
+ if (cur_is_odm_in_use == new_is_odm_in_use)
+ /* ODM state isn't changed, not the case we are looking for */
+ return false;
+
+ if (dc->hwss.is_pipe_topology_transition_seamless &&
+ dc->hwss.is_pipe_topology_transition_seamless(
+ dc, dc->current_state, context))
+ /* transition can be achieved without the need for committing
+ * minimal transition state first
+ */
+ return false;
+
+ return true;
+}
+
bool dc_update_planes_and_stream(struct dc *dc,
struct dc_surface_update *srf_updates, int surface_count,
struct dc_stream_state *stream,
@@ -4274,11 +4497,15 @@ bool dc_update_planes_and_stream(struct dc *dc,
*/
bool force_minimal_pipe_splitting = 0;
bool is_plane_addition = 0;
+ bool is_fast_update_only;
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
+ is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
+ surface_count, stream_update, stream);
force_minimal_pipe_splitting = could_mpcc_tree_change_for_active_pipes(
dc,
stream,
+ srf_updates,
surface_count,
&is_plane_addition);
@@ -4325,9 +4552,21 @@ bool dc_update_planes_and_stream(struct dc *dc,
update_type = UPDATE_TYPE_FULL;
}
+ /* when windowed MPO ODM is supported, we need to handle a special case
+ * where we can transition between ODM combine and MPC combine due to
+ * plane scaling update. This transition will require us to commit
+ * minimal transition state. The condition to trigger this update can't
+ * be predicted by could_mpcc_tree_change_for_active_pipes because we
+ * can only determine it after DML validation. Therefore we can't rely
+ * on the existing commit minimal transition state sequence. Instead
+ * we have to add additional handling here to handle this transition
+ * with its own special sequence.
+ */
+ if (should_commit_minimal_transition_for_windowed_mpo_odm(dc, stream, context))
+ commit_minimal_transition_state_for_windowed_mpo_odm(dc,
+ context, stream);
update_seamless_boot_flags(dc, context, surface_count, stream);
- if (fast_update_only(dc, fast_update, srf_updates, surface_count, stream_update, stream) &&
- !dc->debug.enable_legacy_fast_update) {
+ if (is_fast_update_only && !dc->debug.enable_legacy_fast_update) {
commit_planes_for_stream_fast(dc,
srf_updates,
surface_count,
@@ -4340,7 +4579,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
dc->hwss.is_pipe_topology_transition_seamless &&
!dc->hwss.is_pipe_topology_transition_seamless(
dc, dc->current_state, context)) {
-
DC_LOG_ERROR("performing non-seamless pipe topology transition with surface only update!\n");
BREAK_TO_DEBUGGER();
}
@@ -4573,9 +4811,6 @@ void dc_set_power_state(
struct dc *dc,
enum dc_acpi_cm_power_state power_state)
{
- struct kref refcount;
- struct display_mode_lib *dml;
-
if (!dc->current_state)
return;
@@ -4595,30 +4830,8 @@ void dc_set_power_state(
break;
default:
ASSERT(dc->current_state->stream_count == 0);
- /* Zero out the current context so that on resume we start with
- * clean state, and dc hw programming optimizations will not
- * cause any trouble.
- */
- dml = kzalloc(sizeof(struct display_mode_lib),
- GFP_KERNEL);
-
- ASSERT(dml);
- if (!dml)
- return;
-
- /* Preserve refcount */
- refcount = dc->current_state->refcount;
- /* Preserve display mode lib */
- memcpy(dml, &dc->current_state->bw_ctx.dml, sizeof(struct display_mode_lib));
dc_resource_state_destruct(dc->current_state);
- memset(dc->current_state, 0,
- sizeof(*dc->current_state));
-
- dc->current_state->refcount = refcount;
- dc->current_state->bw_ctx.dml = *dml;
-
- kfree(dml);
break;
}
@@ -4700,6 +4913,9 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
if (dc->debug.disable_idle_power_optimizations)
return;
+ if (dc->caps.ips_support && (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
+ return;
+
if (dc->clk_mgr != NULL && dc->clk_mgr->funcs->is_smu_present)
if (!dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr))
return;
@@ -4711,6 +4927,26 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
+bool dc_dmub_is_ips_idle_state(struct dc *dc)
+{
+ uint32_t idle_state = 0;
+
+ if (dc->debug.disable_idle_power_optimizations)
+ return false;
+
+ if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL))
+ return false;
+
+ if (dc->hwss.get_idle_state)
+ idle_state = dc->hwss.get_idle_state(dc);
+
+ if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
+ !(idle_state & DMUB_IPS2_ALLOW_MASK))
+ return true;
+
+ return false;
+}
+
/* set min and max memory clock to lowest and highest DPM level, respectively */
void dc_unlock_memory_clock_frequency(struct dc *dc)
{
@@ -4983,7 +5219,7 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
);
}
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -5037,7 +5273,7 @@ bool dc_process_dmub_set_config_async(struct dc *dc,
cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
- if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) {
/* command is not processed by dmub */
notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
return is_cmd_complete;
@@ -5080,7 +5316,7 @@ enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
- if (!dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ if (!dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
/* command is not processed by dmub */
return DC_ERROR_UNEXPECTED;
@@ -5118,7 +5354,7 @@ void dc_process_dmub_dpia_hpd_int_enable(const struct dc *dc,
cmd.dpia_hpd_int_enable.header.type = DMUB_CMD__DPIA_HPD_INT_ENABLE;
cmd.dpia_hpd_int_enable.enable = hpd_int_enable;
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
DC_LOG_DEBUG("%s: hpd_int_enable(%d)\n", __func__, hpd_int_enable);
}
@@ -5253,25 +5489,28 @@ bool dc_abm_save_restore(
void dc_query_current_properties(struct dc *dc, struct dc_current_properties *properties)
{
unsigned int i;
- bool subvp_in_use = false;
+ bool subvp_sw_cursor_req = false;
for (i = 0; i < dc->current_state->stream_count; i++) {
- if (dc->current_state->streams[i]->mall_stream_config.type != SUBVP_NONE) {
- subvp_in_use = true;
+ if (check_subvp_sw_cursor_fallback_req(dc, dc->current_state->streams[i])) {
+ subvp_sw_cursor_req = true;
break;
}
}
- properties->cursor_size_limit = subvp_in_use ? 64 : dc->caps.max_cursor_size;
+ properties->cursor_size_limit = subvp_sw_cursor_req ? 64 : dc->caps.max_cursor_size;
}
/**
- *****************************************************************************
* dc_set_edp_power() - DM controls eDP power to be ON/OFF
*
* Called when DM wants to power on/off eDP.
* Only work on links with flag skip_implict_edp_power_control is set.
*
- *****************************************************************************
+ * @dc: Current DC state
+ * @edp_link: a link with eDP connector signal type
+ * @powerOn: power on/off eDP
+ *
+ * Return: void
*/
void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
bool powerOn)
@@ -5285,3 +5524,20 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link,
edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn);
}
+/*
+ *****************************************************************************
+ * dc_get_power_profile_for_dc_state() - extracts power profile from dc state
+ *
+ * Called when DM wants to make power policy decisions based on dc_state
+ *
+ *****************************************************************************
+ */
+struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context)
+{
+ struct dc_power_profile profile = { 0 };
+
+ profile.power_level += !context->bw_ctx.bw.dcn.clk.p_state_change_support;
+
+ return profile;
+}
+