From ace9429bb58fd418f0c81d4c2835699bddf6bde6 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Thu, 11 Apr 2024 10:27:49 +0200 Subject: Adding upstream version 6.6.15. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/amd/display/dc/link/Makefile | 64 + .../amd/display/dc/link/accessories/link_dp_cts.c | 1016 ++++++++ .../amd/display/dc/link/accessories/link_dp_cts.h | 44 + .../display/dc/link/accessories/link_dp_trace.c | 173 ++ .../display/dc/link/accessories/link_dp_trace.h | 63 + .../amd/display/dc/link/accessories/link_fpga.c | 95 + .../amd/display/dc/link/accessories/link_fpga.h | 30 + .../drm/amd/display/dc/link/hwss/link_hwss_dio.c | 256 ++ .../drm/amd/display/dc/link/hwss/link_hwss_dio.h | 62 + .../link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c | 200 ++ .../link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h | 37 + .../drm/amd/display/dc/link/hwss/link_hwss_dpia.c | 82 + .../drm/amd/display/dc/link/hwss/link_hwss_dpia.h | 34 + .../amd/display/dc/link/hwss/link_hwss_hpo_dp.c | 216 ++ .../amd/display/dc/link/hwss/link_hwss_hpo_dp.h | 62 + .../hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c | 229 ++ .../hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h | 33 + .../gpu/drm/amd/display/dc/link/link_detection.c | 1444 +++++++++++ .../gpu/drm/amd/display/dc/link/link_detection.h | 43 + drivers/gpu/drm/amd/display/dc/link/link_dpms.c | 2541 ++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/link/link_dpms.h | 53 + drivers/gpu/drm/amd/display/dc/link/link_factory.c | 867 +++++++ drivers/gpu/drm/amd/display/dc/link/link_factory.h | 31 + .../drm/amd/display/dc/link/link_hwss_hpo_frl.c | 62 + .../gpu/drm/amd/display/dc/link/link_resource.c | 114 + .../gpu/drm/amd/display/dc/link/link_resource.h | 32 + .../gpu/drm/amd/display/dc/link/link_validation.c | 368 +++ .../gpu/drm/amd/display/dc/link/link_validation.h | 39 + .../drm/amd/display/dc/link/protocols/link_ddc.c | 605 +++++ .../drm/amd/display/dc/link/protocols/link_ddc.h | 106 + .../display/dc/link/protocols/link_dp_capability.c | 2293 ++++++++++++++++++ .../display/dc/link/protocols/link_dp_capability.h | 107 + .../amd/display/dc/link/protocols/link_dp_dpia.c | 105 + .../amd/display/dc/link/protocols/link_dp_dpia.h | 41 + .../display/dc/link/protocols/link_dp_dpia_bw.c | 492 ++++ .../display/dc/link/protocols/link_dp_dpia_bw.h | 102 + .../dc/link/protocols/link_dp_irq_handler.c | 490 ++++ .../dc/link/protocols/link_dp_irq_handler.h | 41 + .../amd/display/dc/link/protocols/link_dp_phy.c | 209 ++ .../amd/display/dc/link/protocols/link_dp_phy.h | 59 + .../display/dc/link/protocols/link_dp_training.c | 1732 +++++++++++++ .../display/dc/link/protocols/link_dp_training.h | 185 ++ .../dc/link/protocols/link_dp_training_128b_132b.c | 265 ++ .../dc/link/protocols/link_dp_training_128b_132b.h | 42 + .../dc/link/protocols/link_dp_training_8b_10b.c | 420 ++++ .../dc/link/protocols/link_dp_training_8b_10b.h | 61 + .../dc/link/protocols/link_dp_training_auxless.c | 79 + .../dc/link/protocols/link_dp_training_auxless.h | 35 + .../dc/link/protocols/link_dp_training_dpia.c | 1048 ++++++++ .../dc/link/protocols/link_dp_training_dpia.h | 41 + .../link_dp_training_fixed_vs_pe_retimer.c | 891 +++++++ .../link_dp_training_fixed_vs_pe_retimer.h | 50 + .../drm/amd/display/dc/link/protocols/link_dpcd.c | 249 ++ .../drm/amd/display/dc/link/protocols/link_dpcd.h | 42 + .../dc/link/protocols/link_edp_panel_control.c | 1079 +++++++++ .../dc/link/protocols/link_edp_panel_control.h | 74 + .../drm/amd/display/dc/link/protocols/link_hpd.c | 240 ++ .../drm/amd/display/dc/link/protocols/link_hpd.h | 54 + 58 files changed, 19527 insertions(+) create mode 100644 drivers/gpu/drm/amd/display/dc/link/Makefile create mode 100644 drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_detection.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_detection.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_dpms.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_dpms.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_factory.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_factory.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_resource.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_resource.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_validation.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/link_validation.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c create mode 100644 drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h (limited to 'drivers/gpu/drm/amd/display/dc/link') diff --git a/drivers/gpu/drm/amd/display/dc/link/Makefile b/drivers/gpu/drm/amd/display/dc/link/Makefile new file mode 100644 index 0000000000..6af8a97d4a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/Makefile @@ -0,0 +1,64 @@ +# +# Copyright 2022 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the link sub-component of DAL. +# It abstracts the control and status of back end pipe such as DIO, HPO, DPIA, +# PHY, HPD, DDC and etc). + +LINK = link_detection.o link_dpms.o link_factory.o link_resource.o \ +link_validation.o + +AMD_DAL_LINK = $(addprefix $(AMDDALPATH)/dc/link/, \ +$(LINK)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK) +############################################################################### +# accessories +############################################################################### +LINK_ACCESSORIES = link_dp_trace.o link_dp_cts.o link_fpga.o + +AMD_DAL_LINK_ACCESSORIES = $(addprefix $(AMDDALPATH)/dc/link/accessories/, \ +$(LINK_ACCESSORIES)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_ACCESSORIES) +############################################################################### +# hwss +############################################################################### +LINK_HWSS = link_hwss_dio.o link_hwss_dpia.o link_hwss_hpo_dp.o \ +link_hwss_dio_fixed_vs_pe_retimer.o link_hwss_hpo_fixed_vs_pe_retimer_dp.o + +AMD_DAL_LINK_HWSS = $(addprefix $(AMDDALPATH)/dc/link/hwss/, \ +$(LINK_HWSS)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_HWSS) +############################################################################### +# protocols +############################################################################### +LINK_PROTOCOLS = link_hpd.o link_ddc.o link_dpcd.o link_dp_dpia.o \ +link_dp_training.o link_dp_training_8b_10b.o link_dp_training_128b_132b.o \ +link_dp_training_dpia.o link_dp_training_auxless.o \ +link_dp_training_fixed_vs_pe_retimer.o link_dp_phy.o link_dp_capability.o \ +link_edp_panel_control.o link_dp_irq_handler.o link_dp_dpia_bw.o + +AMD_DAL_LINK_PROTOCOLS = $(addprefix $(AMDDALPATH)/dc/link/protocols/, \ +$(LINK_PROTOCOLS)) + +AMD_DISPLAY_FILES += $(AMD_DAL_LINK_PROTOCOLS) \ No newline at end of file diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c new file mode 100644 index 0000000000..fe4282771c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -0,0 +1,1016 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_dp_cts.h" +#include "link/link_resource.h" +#include "link/protocols/link_dpcd.h" +#include "link/protocols/link_dp_training.h" +#include "link/protocols/link_dp_phy.h" +#include "link/protocols/link_dp_training_fixed_vs_pe_retimer.h" +#include "link/protocols/link_dp_capability.h" +#include "link/link_dpms.h" +#include "resource.h" +#include "dm_helpers.h" +#include "dc_dmub_srv.h" +#include "dce/dmub_hw_lock_mgr.h" + +#define DC_LOGGER \ + link->ctx->logger + +static enum dc_link_rate get_link_rate_from_test_link_rate(uint8_t test_rate) +{ + switch (test_rate) { + case DP_TEST_LINK_RATE_RBR: + return LINK_RATE_LOW; + case DP_TEST_LINK_RATE_HBR: + return LINK_RATE_HIGH; + case DP_TEST_LINK_RATE_HBR2: + return LINK_RATE_HIGH2; + case DP_TEST_LINK_RATE_HBR3: + return LINK_RATE_HIGH3; + case DP_TEST_LINK_RATE_UHBR10: + return LINK_RATE_UHBR10; + case DP_TEST_LINK_RATE_UHBR20: + return LINK_RATE_UHBR20; + case DP_TEST_LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR13_5; + default: + return LINK_RATE_UNKNOWN; + } +} + +static bool is_dp_phy_sqaure_pattern(enum dp_test_pattern test_pattern) +{ + return (DP_TEST_PATTERN_SQUARE_BEGIN <= test_pattern && + test_pattern <= DP_TEST_PATTERN_SQUARE_END); +} + +static bool is_dp_phy_pattern(enum dp_test_pattern test_pattern) +{ + if ((DP_TEST_PATTERN_PHY_PATTERN_BEGIN <= test_pattern && + test_pattern <= DP_TEST_PATTERN_PHY_PATTERN_END) || + test_pattern == DP_TEST_PATTERN_VIDEO_MODE) + return true; + else + return false; +} + +static void dp_retrain_link_dp_test(struct dc_link *link, + struct dc_link_settings *link_setting, + bool skip_video_pattern) +{ + struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_state *state = link->dc->current_state; + uint8_t count; + int i; + + udelay(100); + + link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + + for (i = 0; i < count; i++) { + link_set_dpms_off(pipes[i]); + pipes[i]->link_config.dp_link_settings = *link_setting; + update_dp_encoder_resources_for_test_harness( + link->dc, + state, + pipes[i]); + } + + for (i = count-1; i >= 0; i--) + link_set_dpms_on(state, pipes[i]); +} + +static void dp_test_send_link_training(struct dc_link *link) +{ + struct dc_link_settings link_settings = {0}; + uint8_t test_rate = 0; + + core_link_read_dpcd( + link, + DP_TEST_LANE_COUNT, + (unsigned char *)(&link_settings.lane_count), + 1); + core_link_read_dpcd( + link, + DP_TEST_LINK_RATE, + &test_rate, + 1); + link_settings.link_rate = get_link_rate_from_test_link_rate(test_rate); + + /* Set preferred link settings */ + link->verified_link_cap.lane_count = link_settings.lane_count; + link->verified_link_cap.link_rate = link_settings.link_rate; + + dp_retrain_link_dp_test(link, &link_settings, false); +} + +static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video) +{ + union audio_test_mode dpcd_test_mode = {0}; + struct audio_test_pattern_type dpcd_pattern_type = {0}; + union audio_test_pattern_period dpcd_pattern_period[AUDIO_CHANNELS_COUNT] = {0}; + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = &pipes[0]; + unsigned int channel_count; + unsigned int channel = 0; + unsigned int modes = 0; + unsigned int sampling_rate_in_hz = 0; + + // get audio test mode and test pattern parameters + core_link_read_dpcd( + link, + DP_TEST_AUDIO_MODE, + &dpcd_test_mode.raw, + sizeof(dpcd_test_mode)); + + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PATTERN_TYPE, + &dpcd_pattern_type.value, + sizeof(dpcd_pattern_type)); + + channel_count = min(dpcd_test_mode.bits.channel_count + 1, AUDIO_CHANNELS_COUNT); + + // read pattern periods for requested channels when sawTooth pattern is requested + if (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH || + dpcd_pattern_type.value == AUDIO_TEST_PATTERN_OPERATOR_DEFINED) { + + test_pattern = (dpcd_pattern_type.value == AUDIO_TEST_PATTERN_SAWTOOTH) ? + DP_TEST_PATTERN_AUDIO_SAWTOOTH : DP_TEST_PATTERN_AUDIO_OPERATOR_DEFINED; + // read period for each channel + for (channel = 0; channel < channel_count; channel++) { + core_link_read_dpcd( + link, + DP_TEST_AUDIO_PERIOD_CH1 + channel, + &dpcd_pattern_period[channel].raw, + sizeof(dpcd_pattern_period[channel])); + } + } + + // translate sampling rate + switch (dpcd_test_mode.bits.sampling_rate) { + case AUDIO_SAMPLING_RATE_32KHZ: + sampling_rate_in_hz = 32000; + break; + case AUDIO_SAMPLING_RATE_44_1KHZ: + sampling_rate_in_hz = 44100; + break; + case AUDIO_SAMPLING_RATE_48KHZ: + sampling_rate_in_hz = 48000; + break; + case AUDIO_SAMPLING_RATE_88_2KHZ: + sampling_rate_in_hz = 88200; + break; + case AUDIO_SAMPLING_RATE_96KHZ: + sampling_rate_in_hz = 96000; + break; + case AUDIO_SAMPLING_RATE_176_4KHZ: + sampling_rate_in_hz = 176400; + break; + case AUDIO_SAMPLING_RATE_192KHZ: + sampling_rate_in_hz = 192000; + break; + default: + sampling_rate_in_hz = 0; + break; + } + + link->audio_test_data.flags.test_requested = 1; + link->audio_test_data.flags.disable_video = disable_video; + link->audio_test_data.sampling_rate = sampling_rate_in_hz; + link->audio_test_data.channel_count = channel_count; + link->audio_test_data.pattern_type = test_pattern; + + if (test_pattern == DP_TEST_PATTERN_AUDIO_SAWTOOTH) { + for (modes = 0; modes < pipe_ctx->stream->audio_info.mode_count; modes++) { + link->audio_test_data.pattern_period[modes] = dpcd_pattern_period[modes].bits.pattern_period; + } + } +} + +/* TODO Raven hbr2 compliance eye output is unstable + * (toggling on and off) with debugger break + * This caueses intermittent PHY automation failure + * Need to look into the root cause */ +static void dp_test_send_phy_test_pattern(struct dc_link *link) +{ + union phy_test_pattern dpcd_test_pattern; + union lane_adjust dpcd_lane_adjustment[2]; + unsigned char dpcd_post_cursor_2_adjustment = 0; + unsigned char test_pattern_buffer[ + (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - + DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned int test_pattern_size = 0; + enum dp_test_pattern test_pattern; + union lane_adjust dpcd_lane_adjust; + unsigned int lane; + struct link_training_settings link_training_settings; + unsigned char no_preshoot = 0; + unsigned char no_deemphasis = 0; + + dpcd_test_pattern.raw = 0; + memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment)); + memset(&link_training_settings, 0, sizeof(link_training_settings)); + + /* get phy test pattern and pattern parameters from DP receiver */ + core_link_read_dpcd( + link, + DP_PHY_TEST_PATTERN, + &dpcd_test_pattern.raw, + sizeof(dpcd_test_pattern)); + core_link_read_dpcd( + link, + DP_ADJUST_REQUEST_LANE0_1, + &dpcd_lane_adjustment[0].raw, + sizeof(dpcd_lane_adjustment)); + + /* prepare link training settings */ + link_training_settings.link_settings = link->cur_link_settings; + + link_training_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link->cur_link_settings); + + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link_training_settings.lttpr_mode == LTTPR_MODE_TRANSPARENT) + dp_fixed_vs_pe_read_lane_adjust( + link, + link_training_settings.dpcd_lane_settings); + + /*get post cursor 2 parameters + * For DP 1.1a or eariler, this DPCD register's value is 0 + * For DP 1.2 or later: + * Bits 1:0 = POST_CURSOR2_LANE0; Bits 3:2 = POST_CURSOR2_LANE1 + * Bits 5:4 = POST_CURSOR2_LANE2; Bits 7:6 = POST_CURSOR2_LANE3 + */ + core_link_read_dpcd( + link, + DP_ADJUST_REQUEST_POST_CURSOR2, + &dpcd_post_cursor_2_adjustment, + sizeof(dpcd_post_cursor_2_adjustment)); + + /* translate request */ + switch (dpcd_test_pattern.bits.PATTERN) { + case PHY_TEST_PATTERN_D10_2: + test_pattern = DP_TEST_PATTERN_D102; + break; + case PHY_TEST_PATTERN_SYMBOL_ERROR: + test_pattern = DP_TEST_PATTERN_SYMBOL_ERROR; + break; + case PHY_TEST_PATTERN_PRBS7: + test_pattern = DP_TEST_PATTERN_PRBS7; + break; + case PHY_TEST_PATTERN_80BIT_CUSTOM: + test_pattern = DP_TEST_PATTERN_80BIT_CUSTOM; + break; + case PHY_TEST_PATTERN_CP2520_1: + /* CP2520 pattern is unstable, temporarily use TPS4 instead */ + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? + DP_TEST_PATTERN_TRAINING_PATTERN4 : + DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; + break; + case PHY_TEST_PATTERN_CP2520_2: + /* CP2520 pattern is unstable, temporarily use TPS4 instead */ + test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ? + DP_TEST_PATTERN_TRAINING_PATTERN4 : + DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE; + break; + case PHY_TEST_PATTERN_CP2520_3: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; + break; + case PHY_TEST_PATTERN_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; + break; + case PHY_TEST_PATTERN_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2; + break; + case PHY_TEST_PATTERN_PRBS9: + test_pattern = DP_TEST_PATTERN_PRBS9; + break; + case PHY_TEST_PATTERN_PRBS11: + test_pattern = DP_TEST_PATTERN_PRBS11; + break; + case PHY_TEST_PATTERN_PRBS15: + test_pattern = DP_TEST_PATTERN_PRBS15; + break; + case PHY_TEST_PATTERN_PRBS23: + test_pattern = DP_TEST_PATTERN_PRBS23; + break; + case PHY_TEST_PATTERN_PRBS31: + test_pattern = DP_TEST_PATTERN_PRBS31; + break; + case PHY_TEST_PATTERN_264BIT_CUSTOM: + test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM; + break; + case PHY_TEST_PATTERN_SQUARE: + test_pattern = DP_TEST_PATTERN_SQUARE; + break; + case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: + test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; + no_preshoot = 1; + break; + case PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: + test_pattern = DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; + no_deemphasis = 1; + break; + case PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: + test_pattern = DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; + no_preshoot = 1; + no_deemphasis = 1; + break; + default: + test_pattern = DP_TEST_PATTERN_VIDEO_MODE; + break; + } + + if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { + test_pattern_size = (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0) + 1; + core_link_read_dpcd( + link, + DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_pattern_buffer, + test_pattern_size); + } + + if (is_dp_phy_sqaure_pattern(test_pattern)) { + test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) + core_link_read_dpcd( + link, + DP_PHY_SQUARE_PATTERN, + test_pattern_buffer, + test_pattern_size); + } + + if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) { + test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256- + DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1; + core_link_read_dpcd( + link, + DP_TEST_264BIT_CUSTOM_PATTERN_7_0, + test_pattern_buffer, + test_pattern_size); + } + + for (lane = 0; lane < + (unsigned int)(link->cur_link_settings.lane_count); + lane++) { + dpcd_lane_adjust.raw = + dp_get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane); + if (link_dp_get_encoding_format(&link->cur_link_settings) == + DP_8b_10b_ENCODING) { + link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing) + (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE); + link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis) + (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE); + link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = + (enum dc_post_cursor2) + ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); + } else if (link_dp_get_encoding_format(&link->cur_link_settings) == + DP_128b_132b_ENCODING) { + link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.level = + dpcd_lane_adjust.tx_ffe.PRESET_VALUE; + link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_preshoot = no_preshoot; + link_training_settings.hw_lane_settings[lane].FFE_PRESET.settings.no_deemphasis = no_deemphasis; + } + } + + dp_hw_to_dpcd_lane_settings(&link_training_settings, + link_training_settings.hw_lane_settings, + link_training_settings.dpcd_lane_settings); + /*Usage: Measure DP physical lane signal + * by DP SI test equipment automatically. + * PHY test pattern request is generated by equipment via HPD interrupt. + * HPD needs to be active all the time. HPD should be active + * all the time. Do not touch it. + * forward request to DS + */ + dp_set_test_pattern( + link, + test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED, + &link_training_settings, + test_pattern_buffer, + test_pattern_size); +} + +static void set_crtc_test_pattern(struct dc_link *link, + struct pipe_ctx *pipe_ctx, + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space) +{ + enum controller_dp_test_pattern controller_test_pattern; + enum dc_color_depth color_depth = pipe_ctx-> + stream->timing.display_color_depth; + struct bit_depth_reduction_params params; + struct output_pixel_processor *opp = pipe_ctx->stream_res.opp; + struct pipe_ctx *odm_pipe; + int odm_cnt = 1; + int h_active = pipe_ctx->stream->timing.h_addressable + + pipe_ctx->stream->timing.h_border_left + + pipe_ctx->stream->timing.h_border_right; + int v_active = pipe_ctx->stream->timing.v_addressable + + pipe_ctx->stream->timing.v_border_bottom + + pipe_ctx->stream->timing.v_border_top; + int odm_slice_width, last_odm_slice_width, offset = 0; + + memset(¶ms, 0, sizeof(params)); + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_cnt++; + + odm_slice_width = h_active / odm_cnt; + last_odm_slice_width = h_active - odm_slice_width * (odm_cnt - 1); + + switch (test_pattern) { + case DP_TEST_PATTERN_COLOR_SQUARES: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_COLORSQUARES; + break; + case DP_TEST_PATTERN_COLOR_SQUARES_CEA: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA; + break; + case DP_TEST_PATTERN_VERTICAL_BARS: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_VERTICALBARS; + break; + case DP_TEST_PATTERN_HORIZONTAL_BARS: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS; + break; + case DP_TEST_PATTERN_COLOR_RAMP: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_COLORRAMP; + break; + default: + controller_test_pattern = + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE; + break; + } + + switch (test_pattern) { + case DP_TEST_PATTERN_COLOR_SQUARES: + case DP_TEST_PATTERN_COLOR_SQUARES_CEA: + case DP_TEST_PATTERN_VERTICAL_BARS: + case DP_TEST_PATTERN_HORIZONTAL_BARS: + case DP_TEST_PATTERN_COLOR_RAMP: + { + /* disable bit depth reduction */ + pipe_ctx->stream->bit_depth_params = params; + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + controller_test_pattern, color_depth); + } else if (link->dc->hwss.set_disp_pattern_generator) { + enum controller_dp_color_space controller_color_space; + struct output_pixel_processor *odm_opp; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_RGB; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR601; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_YCBCR709; + break; + case DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED: + default: + controller_color_space = CONTROLLER_DP_COLOR_SPACE_UDEFINED; + DC_LOG_ERROR("%s: Color space must be defined for test pattern", __func__); + ASSERT(0); + break; + } + + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; + } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + controller_test_pattern, + controller_color_space, + color_depth, + NULL, + last_odm_slice_width, + v_active, + offset); + } + } + break; + case DP_TEST_PATTERN_VIDEO_MODE: + { + /* restore bitdepth reduction */ + resource_build_bit_depth_reduction_params(pipe_ctx->stream, ¶ms); + pipe_ctx->stream->bit_depth_params = params; + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) { + opp->funcs->opp_program_bit_depth_reduction(opp, ¶ms); + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + color_depth); + } else if (link->dc->hwss.set_disp_pattern_generator) { + struct output_pixel_processor *odm_opp; + + odm_pipe = pipe_ctx; + while (odm_pipe->next_odm_pipe) { + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, + color_depth, + NULL, + odm_slice_width, + v_active, + offset); + offset += odm_slice_width; + odm_pipe = odm_pipe->next_odm_pipe; + } + odm_opp = odm_pipe->stream_res.opp; + odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, ¶ms); + link->dc->hwss.set_disp_pattern_generator(link->dc, + odm_pipe, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + CONTROLLER_DP_COLOR_SPACE_UDEFINED, + color_depth, + NULL, + last_odm_slice_width, + v_active, + offset); + } + } + break; + + default: + break; + } +} + +void dp_handle_automated_test(struct dc_link *link) +{ + union test_request test_request; + union test_response test_response; + + memset(&test_request, 0, sizeof(test_request)); + memset(&test_response, 0, sizeof(test_response)); + + core_link_read_dpcd( + link, + DP_TEST_REQUEST, + &test_request.raw, + sizeof(union test_request)); + if (test_request.bits.LINK_TRAINING) { + /* ACK first to let DP RX test box monitor LT sequence */ + test_response.bits.ACK = 1; + core_link_write_dpcd( + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + dp_test_send_link_training(link); + /* no acknowledge request is needed again */ + test_response.bits.ACK = 0; + } + if (test_request.bits.LINK_TEST_PATTRN) { + union test_misc dpcd_test_params; + union link_test_pattern dpcd_test_pattern; + + memset(&dpcd_test_pattern, 0, sizeof(dpcd_test_pattern)); + memset(&dpcd_test_params, 0, sizeof(dpcd_test_params)); + + /* get link test pattern and pattern parameters */ + core_link_read_dpcd( + link, + DP_TEST_PATTERN, + &dpcd_test_pattern.raw, + sizeof(dpcd_test_pattern)); + core_link_read_dpcd( + link, + DP_TEST_MISC0, + &dpcd_test_params.raw, + sizeof(dpcd_test_params)); + test_response.bits.ACK = dm_helpers_dp_handle_test_pattern_request(link->ctx, link, + dpcd_test_pattern, dpcd_test_params) ? 1 : 0; + } + + if (test_request.bits.AUDIO_TEST_PATTERN) { + dp_test_get_audio_test_data(link, test_request.bits.TEST_AUDIO_DISABLED_VIDEO); + test_response.bits.ACK = 1; + } + + if (test_request.bits.PHY_TEST_PATTERN) { + dp_test_send_phy_test_pattern(link); + test_response.bits.ACK = 1; + } + + /* send request acknowledgment */ + if (test_response.bits.ACK) + core_link_write_dpcd( + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); +} + +bool dp_set_test_pattern( + struct dc_link *link, + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, + const struct link_training_settings *p_link_settings, + const unsigned char *p_custom_pattern, + unsigned int cust_pattern_size) +{ + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = NULL; + unsigned int lane; + unsigned int i; + unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0}; + union dpcd_training_pattern training_pattern; + enum dpcd_phy_test_patterns pattern; + + memset(&training_pattern, 0, sizeof(training_pattern)); + + for (i = 0; i < MAX_PIPES; i++) { + if (pipes[i].stream == NULL) + continue; + + if (resource_is_pipe_type(&pipes[i], OTG_MASTER) && + pipes[i].stream->link == link) { + pipe_ctx = &pipes[i]; + break; + } + } + + if (pipe_ctx == NULL) + return false; + + /* Reset CRTC Test Pattern if it is currently running and request is VideoMode */ + if (link->test_pattern_enabled && test_pattern == + DP_TEST_PATTERN_VIDEO_MODE) { + /* Set CRTC Test Pattern */ + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, + (uint8_t *)p_custom_pattern, + (uint32_t)cust_pattern_size); + + /* Unblank Stream */ + link->dc->hwss.unblank_stream( + pipe_ctx, + &link->verified_link_cap); + /* TODO:m_pHwss->MuteAudioEndpoint + * (pPathMode->pDisplayPath, false); + */ + + /* Reset Test Pattern state */ + link->test_pattern_enabled = false; + link->current_test_pattern = test_pattern; + + return true; + } + + /* Check for PHY Test Patterns */ + if (is_dp_phy_pattern(test_pattern)) { + /* Set DPCD Lane Settings before running test pattern */ + if (p_link_settings != NULL) { + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + p_link_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + dp_fixed_vs_pe_set_retimer_lane_settings( + link, + p_link_settings->dpcd_lane_settings, + p_link_settings->link_settings.lane_count); + } else { + dp_set_hw_lane_settings(link, &pipe_ctx->link_res, p_link_settings, DPRX); + } + dpcd_set_lane_settings(link, p_link_settings, DPRX); + } + + /* Blank stream if running test pattern */ + if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { + /*TODO: + * m_pHwss-> + * MuteAudioEndpoint(pPathMode->pDisplayPath, true); + */ + /* Blank stream */ + link->dc->hwss.blank_stream(pipe_ctx); + } + + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, test_pattern, + (uint8_t *)p_custom_pattern, + (uint32_t)cust_pattern_size); + + if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { + /* Set Test Pattern state */ + link->test_pattern_enabled = true; + link->current_test_pattern = test_pattern; + if (p_link_settings != NULL) + dpcd_set_link_settings(link, + p_link_settings); + } + + switch (test_pattern) { + case DP_TEST_PATTERN_VIDEO_MODE: + pattern = PHY_TEST_PATTERN_NONE; + break; + case DP_TEST_PATTERN_D102: + pattern = PHY_TEST_PATTERN_D10_2; + break; + case DP_TEST_PATTERN_SYMBOL_ERROR: + pattern = PHY_TEST_PATTERN_SYMBOL_ERROR; + break; + case DP_TEST_PATTERN_PRBS7: + pattern = PHY_TEST_PATTERN_PRBS7; + break; + case DP_TEST_PATTERN_80BIT_CUSTOM: + pattern = PHY_TEST_PATTERN_80BIT_CUSTOM; + break; + case DP_TEST_PATTERN_CP2520_1: + pattern = PHY_TEST_PATTERN_CP2520_1; + break; + case DP_TEST_PATTERN_CP2520_2: + pattern = PHY_TEST_PATTERN_CP2520_2; + break; + case DP_TEST_PATTERN_CP2520_3: + pattern = PHY_TEST_PATTERN_CP2520_3; + break; + case DP_TEST_PATTERN_128b_132b_TPS1: + pattern = PHY_TEST_PATTERN_128b_132b_TPS1; + break; + case DP_TEST_PATTERN_128b_132b_TPS2: + pattern = PHY_TEST_PATTERN_128b_132b_TPS2; + break; + case DP_TEST_PATTERN_PRBS9: + pattern = PHY_TEST_PATTERN_PRBS9; + break; + case DP_TEST_PATTERN_PRBS11: + pattern = PHY_TEST_PATTERN_PRBS11; + break; + case DP_TEST_PATTERN_PRBS15: + pattern = PHY_TEST_PATTERN_PRBS15; + break; + case DP_TEST_PATTERN_PRBS23: + pattern = PHY_TEST_PATTERN_PRBS23; + break; + case DP_TEST_PATTERN_PRBS31: + pattern = PHY_TEST_PATTERN_PRBS31; + break; + case DP_TEST_PATTERN_264BIT_CUSTOM: + pattern = PHY_TEST_PATTERN_264BIT_CUSTOM; + break; + case DP_TEST_PATTERN_SQUARE: + pattern = PHY_TEST_PATTERN_SQUARE; + break; + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED: + pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DISABLED; + break; + case DP_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED: + pattern = PHY_TEST_PATTERN_SQUARE_DEEMPHASIS_DISABLED; + break; + case DP_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED: + pattern = PHY_TEST_PATTERN_SQUARE_PRESHOOT_DEEMPHASIS_DISABLED; + break; + default: + return false; + } + + if (test_pattern == DP_TEST_PATTERN_VIDEO_MODE + /*TODO:&& !pPathMode->pDisplayPath->IsTargetPoweredOn()*/) + return false; + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + if (is_dp_phy_sqaure_pattern(test_pattern)) + core_link_write_dpcd(link, + DP_LINK_SQUARE_PATTERN, + p_custom_pattern, + 1); + + /* tell receiver that we are sending qualification + * pattern DP 1.2 or later - DP receiver's link quality + * pattern is set using DPCD LINK_QUAL_LANEx_SET + * register (0x10B~0x10E)\ + */ + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) + link_qual_pattern[lane] = + (unsigned char)(pattern); + + core_link_write_dpcd(link, + DP_LINK_QUAL_LANE0_SET, + link_qual_pattern, + sizeof(link_qual_pattern)); + } else if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_10 || + link->dpcd_caps.dpcd_rev.raw == 0) { + /* tell receiver that we are sending qualification + * pattern DP 1.1a or earlier - DP receiver's link + * quality pattern is set using + * DPCD TRAINING_PATTERN_SET -> LINK_QUAL_PATTERN_SET + * register (0x102). We will use v_1.3 when we are + * setting test pattern for DP 1.1. + */ + core_link_read_dpcd(link, DP_TRAINING_PATTERN_SET, + &training_pattern.raw, + sizeof(training_pattern)); + training_pattern.v1_3.LINK_QUAL_PATTERN_SET = pattern; + core_link_write_dpcd(link, DP_TRAINING_PATTERN_SET, + &training_pattern.raw, + sizeof(training_pattern)); + } + } else { + enum dc_color_space color_space = COLOR_SPACE_UNKNOWN; + + switch (test_pattern_color_space) { + case DP_TEST_PATTERN_COLOR_SPACE_RGB: + color_space = COLOR_SPACE_SRGB; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_SRGB_LIMITED; + break; + + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR601: + color_space = COLOR_SPACE_YCBCR601; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + case DP_TEST_PATTERN_COLOR_SPACE_YCBCR709: + color_space = COLOR_SPACE_YCBCR709; + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + default: + break; + } + + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable) { + if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { + union dmub_hw_lock_flags hw_locks = { 0 }; + struct dmub_hw_lock_inst_flags inst_flags = { 0 }; + + hw_locks.bits.lock_dig = 1; + inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; + + dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, + true, + &hw_locks, + &inst_flags); + } else + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable( + pipe_ctx->stream_res.tg); + } + + pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); + /* update MSA to requested color space */ + pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc, + &pipe_ctx->stream->timing, + color_space, + pipe_ctx->stream->use_vsc_sdp_for_colorimetry, + link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + + if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) { + if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA) + pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range + else + pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7); + resource_build_info_frame(pipe_ctx); + link->dc->hwss.update_info_frame(pipe_ctx); + } + + /* CRTC Patterns */ + set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space); + pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, + CRTC_STATE_VACTIVE); + + if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable) { + if (pipe_ctx->stream && should_use_dmub_lock(pipe_ctx->stream->link)) { + union dmub_hw_lock_flags hw_locks = { 0 }; + struct dmub_hw_lock_inst_flags inst_flags = { 0 }; + + hw_locks.bits.lock_dig = 1; + inst_flags.dig_inst = pipe_ctx->stream_res.tg->inst; + + dmub_hw_lock_mgr_cmd(link->ctx->dmub_srv, + false, + &hw_locks, + &inst_flags); + } else + pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable( + pipe_ctx->stream_res.tg); + } + + /* Set Test Pattern state */ + link->test_pattern_enabled = true; + link->current_test_pattern = test_pattern; + } + + return true; +} + +void dp_set_preferred_link_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe; + struct dc_stream_state *link_stream; + struct dc_link_settings store_settings = *link_setting; + + link->preferred_link_setting = store_settings; + + /* Retrain with preferred link settings only relevant for + * DP signal type + * Check for non-DP signal or if passive dongle present + */ + if (!dc_is_dp_signal(link->connector_signal) || + link->dongle_max_pix_clk > 0) + return; + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream && pipe->stream->link) { + if (pipe->stream->link == link) { + link_stream = pipe->stream; + break; + } + } + } + + /* Stream not found */ + if (i == MAX_PIPES) + return; + + /* Cannot retrain link if backend is off */ + if (link_stream->dpms_off) + return; + + if (link_decide_link_settings(link_stream, &store_settings)) + dp_retrain_link_dp_test(link, &store_settings, false); +} + +void dp_set_preferred_training_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link_training_overrides *lt_overrides, + struct dc_link *link, + bool skip_immediate_retrain) +{ + if (lt_overrides != NULL) + link->preferred_training_settings = *lt_overrides; + else + memset(&link->preferred_training_settings, 0, sizeof(link->preferred_training_settings)); + + if (link_setting != NULL) { + link->preferred_link_setting = *link_setting; + } else { + link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; + link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; + } + + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->type == dc_connection_mst_branch) + dm_helpers_dp_mst_update_branch_bandwidth(dc->ctx, link); + + /* Retrain now, or wait until next stream update to apply */ + if (skip_immediate_retrain == false) + dp_set_preferred_link_settings(dc, &link->preferred_link_setting, link); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h new file mode 100644 index 0000000000..eae23ea7f6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.h @@ -0,0 +1,44 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_DP_CTS_H__ +#define __LINK_DP_CTS_H__ +#include "link.h" +void dp_handle_automated_test(struct dc_link *link); +bool dp_set_test_pattern( + struct dc_link *link, + enum dp_test_pattern test_pattern, + enum dp_test_pattern_color_space test_pattern_color_space, + const struct link_training_settings *p_link_settings, + const unsigned char *p_custom_pattern, + unsigned int cust_pattern_size); +void dp_set_preferred_link_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link *link); +void dp_set_preferred_training_settings(struct dc *dc, + struct dc_link_settings *link_setting, + struct dc_link_training_overrides *lt_overrides, + struct dc_link *link, + bool skip_immediate_retrain); +#endif /* __LINK_DP_CTS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c new file mode 100644 index 0000000000..fbcd8fb58e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.c @@ -0,0 +1,173 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_dp_trace.h" +#include "link/protocols/link_dpcd.h" +#include "link.h" + +void dp_trace_init(struct dc_link *link) +{ + memset(&link->dp_trace, 0, sizeof(link->dp_trace)); + link->dp_trace.is_initialized = true; +} + +void dp_trace_reset(struct dc_link *link) +{ + memset(&link->dp_trace, 0, sizeof(link->dp_trace)); +} + +bool dp_trace_is_initialized(struct dc_link *link) +{ + return link->dp_trace.is_initialized; +} + +void dp_trace_detect_lt_init(struct dc_link *link) +{ + memset(&link->dp_trace.detect_lt_trace, 0, sizeof(link->dp_trace.detect_lt_trace)); +} + +void dp_trace_commit_lt_init(struct dc_link *link) +{ + memset(&link->dp_trace.commit_lt_trace, 0, sizeof(link->dp_trace.commit_lt_trace)); +} + +void dp_trace_link_loss_increment(struct dc_link *link) +{ + link->dp_trace.link_loss_count++; +} + +void dp_trace_lt_fail_count_update(struct dc_link *link, + unsigned int fail_count, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.counts.fail = fail_count; + else + link->dp_trace.commit_lt_trace.counts.fail = fail_count; +} + +void dp_trace_lt_total_count_increment(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.counts.total++; + else + link->dp_trace.commit_lt_trace.counts.total++; +} + +void dp_trace_set_is_logged_flag(struct dc_link *link, + bool in_detection, + bool is_logged) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.is_logged = is_logged; + else + link->dp_trace.commit_lt_trace.is_logged = is_logged; +} + +bool dp_trace_is_logged(struct dc_link *link, bool in_detection) +{ + if (in_detection) + return link->dp_trace.detect_lt_trace.is_logged; + else + return link->dp_trace.commit_lt_trace.is_logged; +} + +void dp_trace_lt_result_update(struct dc_link *link, + enum link_training_result result, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.result = result; + else + link->dp_trace.commit_lt_trace.result = result; +} + +void dp_trace_set_lt_start_timestamp(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.timestamps.start = dm_get_timestamp(link->dc->ctx); + else + link->dp_trace.commit_lt_trace.timestamps.start = dm_get_timestamp(link->dc->ctx); +} + +void dp_trace_set_lt_end_timestamp(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + link->dp_trace.detect_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx); + else + link->dp_trace.commit_lt_trace.timestamps.end = dm_get_timestamp(link->dc->ctx); +} + +unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + return link->dp_trace.detect_lt_trace.timestamps.end; + else + return link->dp_trace.commit_lt_trace.timestamps.end; +} + +const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link, + bool in_detection) +{ + if (in_detection) + return &link->dp_trace.detect_lt_trace.counts; + else + return &link->dp_trace.commit_lt_trace.counts; +} + +unsigned int dp_trace_get_link_loss_count(struct dc_link *link) +{ + return link->dp_trace.link_loss_count; +} + +void dp_trace_set_edp_power_timestamp(struct dc_link *link, + bool power_up) +{ + if (!power_up) + /*save driver power off time stamp*/ + link->dp_trace.edp_trace_power_timestamps.poweroff = dm_get_timestamp(link->dc->ctx); + else + link->dp_trace.edp_trace_power_timestamps.poweron = dm_get_timestamp(link->dc->ctx); +} + +uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link) +{ + return link->dp_trace.edp_trace_power_timestamps.poweron; +} + +uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link) +{ + return link->dp_trace.edp_trace_power_timestamps.poweroff; +} + +void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode) +{ + if (link != NULL && link->dc->debug.enable_driver_sequence_debug) + core_link_write_dpcd(link, DP_SOURCE_SEQUENCE, + &dp_test_mode, sizeof(dp_test_mode)); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h new file mode 100644 index 0000000000..ab437a0c91 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_trace.h @@ -0,0 +1,63 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_DP_TRACE_H__ +#define __LINK_DP_TRACE_H__ +#include "link.h" + +void dp_trace_init(struct dc_link *link); +void dp_trace_reset(struct dc_link *link); +bool dp_trace_is_initialized(struct dc_link *link); +void dp_trace_detect_lt_init(struct dc_link *link); +void dp_trace_commit_lt_init(struct dc_link *link); +void dp_trace_link_loss_increment(struct dc_link *link); +void dp_trace_lt_fail_count_update(struct dc_link *link, + unsigned int fail_count, + bool in_detection); +void dp_trace_lt_total_count_increment(struct dc_link *link, + bool in_detection); +void dp_trace_set_is_logged_flag(struct dc_link *link, + bool in_detection, + bool is_logged); +bool dp_trace_is_logged(struct dc_link *link, + bool in_detection); +void dp_trace_lt_result_update(struct dc_link *link, + enum link_training_result result, + bool in_detection); +void dp_trace_set_lt_start_timestamp(struct dc_link *link, + bool in_detection); +void dp_trace_set_lt_end_timestamp(struct dc_link *link, + bool in_detection); +unsigned long long dp_trace_get_lt_end_timestamp(struct dc_link *link, + bool in_detection); +const struct dp_trace_lt_counts *dp_trace_get_lt_counts(struct dc_link *link, + bool in_detection); +unsigned int dp_trace_get_link_loss_count(struct dc_link *link); +void dp_trace_set_edp_power_timestamp(struct dc_link *link, + bool power_up); +uint64_t dp_trace_get_edp_poweron_timestamp(struct dc_link *link); +uint64_t dp_trace_get_edp_poweroff_timestamp(struct dc_link *link); +void dp_trace_source_sequence(struct dc_link *link, uint8_t dp_test_mode); + +#endif /* __LINK_DP_TRACE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c new file mode 100644 index 0000000000..d3cc604eed --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.c @@ -0,0 +1,95 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_fpga.h" +#include "link/link_dpms.h" +#include "dm_helpers.h" +#include "link_hwss.h" +#include "dccg.h" +#include "resource.h" + +#define DC_LOGGER_INIT(logger) + +void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + uint8_t req_slot_count = 0; + uint8_t vc_id = 1; /// VC ID always 1 for SST + struct dc_link_settings link_settings = pipe_ctx->link_config.dp_link_settings; + const struct link_hwss *link_hwss = get_link_hwss(stream->link, &pipe_ctx->link_res); + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + stream->link->cur_link_settings = link_settings; + + if (link_hwss->ext.enable_dp_link_output) + link_hwss->ext.enable_dp_link_output(stream->link, &pipe_ctx->link_res, + stream->signal, pipe_ctx->clock_source->id, + &link_settings); + + /* Enable DP_STREAM_ENC */ + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + link_set_dsc_pps_packet(pipe_ctx, true, true); + } + + /* Allocate Payload */ + if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) { + // MST case + uint8_t i; + + proposed_table.stream_count = state->stream_count; + for (i = 0; i < state->stream_count; i++) { + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_allocations[i].slot_count = req_slot_count; + proposed_table.stream_allocations[i].vcp_id = i+1; + /* NOTE: This makes assumption that pipe_ctx index is same as stream index */ + proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc; + } + } else { + // SST case + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, stream->link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + proposed_table.stream_count = 1; /// Always 1 stream for SST + proposed_table.stream_allocations[0].slot_count = req_slot_count; + proposed_table.stream_allocations[0].vcp_id = vc_id; + proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + } + + link_hwss->ext.update_stream_allocation_table(stream->link, + &pipe_ctx->link_res, + &proposed_table); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + + dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); + dc->hwss.enable_audio_stream(pipe_ctx); +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h new file mode 100644 index 0000000000..3a80f55959 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_fpga.h @@ -0,0 +1,30 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_FPGA_H__ +#define __LINK_FPGA_H__ +#include "link.h" +void dp_fpga_hpo_enable_link_and_stream(struct dc_state *state, + struct pipe_ctx *pipe_ctx); +#endif /* __LINK_FPGA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c new file mode 100644 index 0000000000..1328a0ade3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.c @@ -0,0 +1,256 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_dio.h" +#include "core_types.h" +#include "link_enc_cfg.h" + +void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) +{ + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; + + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + throttled_vcp_size); +} + +void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + + link_enc->funcs->connect_dig_be_to_fe(link_enc, + pipe_ctx->stream_res.stream_enc->id, true); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence(pipe_ctx->stream->link, + DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE); + if (stream_enc->funcs->map_stream_to_link) + stream_enc->funcs->map_stream_to_link(stream_enc, + stream_enc->stream_enc_inst, link_enc->transmitter - TRANSMITTER_UNIPHY_A); + if (stream_enc->funcs->enable_fifo) + stream_enc->funcs->enable_fifo(stream_enc); +} + +void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + struct stream_encoder *stream_enc = pipe_ctx->stream_res.stream_enc; + + if (stream_enc && stream_enc->funcs->disable_fifo) + stream_enc->funcs->disable_fifo(stream_enc); + + link_enc->funcs->connect_dig_be_to_fe( + link_enc, + pipe_ctx->stream_res.stream_enc->id, + false); + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( + pipe_ctx->stream->link, + DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); + +} + +void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx) +{ + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + + if (!dc_is_virtual_signal(stream->signal)) + stream_encoder->funcs->setup_stereo_sync( + stream_encoder, + pipe_ctx->stream_res.tg->inst, + stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); + + if (dc_is_dp_signal(stream->signal)) + stream_encoder->funcs->dp_set_stream_attribute( + stream_encoder, + &stream->timing, + stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, + link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); + else if (dc_is_hdmi_tmds_signal(stream->signal)) + stream_encoder->funcs->hdmi_set_stream_attribute( + stream_encoder, + &stream->timing, + stream->phy_pix_clk, + pipe_ctx->stream_res.audio != NULL); + else if (dc_is_dvi_signal(stream->signal)) + stream_encoder->funcs->dvi_set_stream_attribute( + stream_encoder, + &stream->timing, + (stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK) ? + true : false); + else if (dc_is_lvds_signal(stream->signal)) + stream_encoder->funcs->lvds_set_stream_attribute( + stream_encoder, + &stream->timing); + + if (dc_is_dp_signal(stream->signal)) + link->dc->link_srv->dp_trace_source_sequence(link, + DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); +} + +void enable_dio_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + if (dc_is_dp_sst_signal(signal)) + link_enc->funcs->enable_dp_output( + link_enc, + link_settings, + clock_source); + else + link_enc->funcs->enable_dp_mst_output( + link_enc, + link_settings, + clock_source); + link->dc->link_srv->dp_trace_source_sequence(link, + DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY); +} + +void disable_dio_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + link_enc->funcs->disable_output(link_enc, signal); + link->dc->link_srv->dp_trace_source_sequence(link, + DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY); +} + +void set_dio_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +void set_dio_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + link_enc->funcs->dp_set_lane_settings(link_enc, link_settings, lane_settings); +} + +void update_dio_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + ASSERT(link_enc); + link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); +} + +void setup_dio_audio_output(struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output, uint32_t audio_inst) +{ + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( + pipe_ctx->stream_res.stream_enc, + audio_inst, + &pipe_ctx->stream->audio_info); + else + pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( + pipe_ctx->stream_res.stream_enc, + audio_inst, + &pipe_ctx->stream->audio_info, + &audio_output->crtc_info); +} + +void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx) +{ + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable( + pipe_ctx->stream_res.stream_enc); + + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.stream_enc, false); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( + pipe_ctx->stream->link, + DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM); +} + +void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx) +{ + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( + pipe_ctx->stream_res.stream_enc, true); + + if (pipe_ctx->stream_res.audio) { + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( + pipe_ctx->stream_res.stream_enc); + else + pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( + pipe_ctx->stream_res.stream_enc); + } + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + pipe_ctx->stream->ctx->dc->link_srv->dp_trace_source_sequence( + pipe_ctx->stream->link, + DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM); +} + +static const struct link_hwss dio_link_hwss = { + .setup_stream_encoder = setup_dio_stream_encoder, + .reset_stream_encoder = reset_dio_stream_encoder, + .setup_stream_attribute = setup_dio_stream_attribute, + .disable_link_output = disable_dio_link_output, + .setup_audio_output = setup_dio_audio_output, + .enable_audio_packet = enable_dio_audio_packet, + .disable_audio_packet = disable_dio_audio_packet, + .ext = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, + .enable_dp_link_output = enable_dio_dp_link_output, + .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, + .set_dp_lane_settings = set_dio_dp_lane_settings, + .update_stream_allocation_table = update_dio_stream_allocation_table, + }, +}; + +bool can_use_dio_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link->link_enc != NULL; +} + +const struct link_hwss *get_dio_link_hwss(void) +{ + return &dio_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h new file mode 100644 index 0000000000..f4633d3cf9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio.h @@ -0,0 +1,62 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_DIO_H__ +#define __LINK_HWSS_DIO_H__ + +#include "link_hwss.h" +#include "link.h" + +const struct link_hwss *get_dio_link_hwss(void); +bool can_use_dio_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); +void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); +void setup_dio_stream_encoder(struct pipe_ctx *pipe_ctx); +void reset_dio_stream_encoder(struct pipe_ctx *pipe_ctx); +void setup_dio_stream_attribute(struct pipe_ctx *pipe_ctx); +void enable_dio_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); +void disable_dio_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); +void set_dio_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params); +void set_dio_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); +void setup_dio_audio_output(struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output, uint32_t audio_inst); +void enable_dio_audio_packet(struct pipe_ctx *pipe_ctx); +void disable_dio_audio_packet(struct pipe_ctx *pipe_ctx); +void update_dio_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table); + +#endif /* __LINK_HWSS_DIO_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c new file mode 100644 index 0000000000..b659baa231 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.c @@ -0,0 +1,200 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_dio.h" +#include "link_hwss_dio_fixed_vs_pe_retimer.h" +#include "link_enc_cfg.h" + +uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link) +{ + // TODO: Get USB-C cable orientation + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + return 0xF2; + else + return 0x12; +} + +void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link) +{ + const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + const uint8_t vendor_lttpr_exit_manual_automation_1[4] = {0x1, 0x50, dp_type, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_2[4] = {0x1, 0x50, 0x50, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_3[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_4[4] = {0x1, 0x10, 0x58, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_5[4] = {0x1, 0x10, 0x59, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_6[4] = {0x1, 0x30, 0x51, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_7[4] = {0x1, 0x30, 0x52, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_8[4] = {0x1, 0x30, 0x54, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_9[4] = {0x1, 0x30, 0x55, 0x0}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], sizeof(vendor_lttpr_exit_manual_automation_0)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_1[0], sizeof(vendor_lttpr_exit_manual_automation_1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_2[0], sizeof(vendor_lttpr_exit_manual_automation_2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_3[0], sizeof(vendor_lttpr_exit_manual_automation_3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_4[0], sizeof(vendor_lttpr_exit_manual_automation_4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_5[0], sizeof(vendor_lttpr_exit_manual_automation_5)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_6[0], sizeof(vendor_lttpr_exit_manual_automation_6)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_7[0], sizeof(vendor_lttpr_exit_manual_automation_7)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_8[0], sizeof(vendor_lttpr_exit_manual_automation_8)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_9[0], sizeof(vendor_lttpr_exit_manual_automation_9)); +} + +static bool set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override(struct dc_link *link, + const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, + const struct link_hwss *link_hwss) +{ + struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; + const uint8_t pltpat_custom[10] = {0x1F, 0x7C, 0xF0, 0xC1, 0x07, 0x1F, 0x7C, 0xF0, 0xC1, 0x07}; + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + + + if (tp_params == NULL) + return false; + + if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && + link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) { + // Deprogram overrides from previous test pattern + dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); + } + + switch (tp_params->dp_phy_pattern) { + case DP_TEST_PATTERN_80BIT_CUSTOM: + if (tp_params->custom_pattern_size == 0 || memcmp(tp_params->custom_pattern, + pltpat_custom, tp_params->custom_pattern_size) != 0) + return false; + break; + case DP_TEST_PATTERN_D102: + break; + default: + if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || + link->current_test_pattern == DP_TEST_PATTERN_D102) + // Deprogram overrides from previous test pattern + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], + sizeof(vendor_lttpr_exit_manual_automation_0)); + + return false; + } + + hw_tp_params.dp_phy_pattern = tp_params->dp_phy_pattern; + hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); + + return true; +} + +static void set_dio_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + + if (!set_dio_fixed_vs_pe_retimer_dp_link_test_pattern_override( + link, link_res, tp_params, get_dio_link_hwss())) { + link_enc->funcs->dp_set_phy_pattern(link_enc, tp_params); + } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link) +{ + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); +} + +static void enable_dio_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (link_settings->lane_count == LANE_COUNT_FOUR) + enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); + + enable_dio_dp_link_output(link, link_res, signal, clock_source, link_settings); +} + +static const struct link_hwss dio_fixed_vs_pe_retimer_link_hwss = { + .setup_stream_encoder = setup_dio_stream_encoder, + .reset_stream_encoder = reset_dio_stream_encoder, + .setup_stream_attribute = setup_dio_stream_attribute, + .disable_link_output = disable_dio_link_output, + .setup_audio_output = setup_dio_audio_output, + .enable_audio_packet = enable_dio_audio_packet, + .disable_audio_packet = disable_dio_audio_packet, + .ext = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, + .enable_dp_link_output = enable_dio_fixed_vs_pe_retimer_dp_link_output, + .set_dp_link_test_pattern = set_dio_fixed_vs_pe_retimer_dp_link_test_pattern, + .set_dp_lane_settings = set_dio_dp_lane_settings, + .update_stream_allocation_table = update_dio_stream_allocation_table, + }, +}; + +bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link) +{ + if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + return false; + + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + + return true; +} + +const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void) +{ + return &dio_fixed_vs_pe_retimer_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h new file mode 100644 index 0000000000..9ac08a3325 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dio_fixed_vs_pe_retimer.h @@ -0,0 +1,37 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ +#define __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ + +#include "link.h" + +uint32_t dp_dio_fixed_vs_pe_retimer_get_lttpr_write_address(struct dc_link *link); +uint8_t dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(struct dc_link *link); +void dp_dio_fixed_vs_pe_retimer_exit_manual_automation(struct dc_link *link); +void enable_dio_fixed_vs_pe_retimer_program_4lane_output(struct dc_link *link); +bool requires_fixed_vs_pe_retimer_dio_link_hwss(const struct dc_link *link); +const struct link_hwss *get_dio_fixed_vs_pe_retimer_link_hwss(void); + +#endif /* __LINK_HWSS_DIO_FIXED_VS_PE_RETIMER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c new file mode 100644 index 0000000000..861f3cd5b3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.c @@ -0,0 +1,82 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_dpia.h" +#include "core_types.h" +#include "link_hwss_dio.h" +#include "link_enc_cfg.h" + +#define DC_LOGGER_INIT(logger) + +static void update_dpia_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table) +{ + struct link_encoder *link_enc = link_enc_cfg_get_link_enc(link); + static enum dc_status status; + uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF; + int i; + DC_LOGGER_INIT(link->ctx->logger); + + for (i = 0; i < table->stream_count; i++) + mst_alloc_slots += table->stream_allocations[i].slot_count; + + status = dc_process_dmub_set_mst_slots(link->dc, link->link_index, + mst_alloc_slots, &prev_mst_slots_in_use); + ASSERT(status == DC_OK); + DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n", + status, mst_alloc_slots, prev_mst_slots_in_use); + + ASSERT(link_enc); + link_enc->funcs->update_mst_stream_allocation_table(link_enc, table); +} + +static const struct link_hwss dpia_link_hwss = { + .setup_stream_encoder = setup_dio_stream_encoder, + .reset_stream_encoder = reset_dio_stream_encoder, + .setup_stream_attribute = setup_dio_stream_attribute, + .disable_link_output = disable_dio_link_output, + .setup_audio_output = setup_dio_audio_output, + .enable_audio_packet = enable_dio_audio_packet, + .disable_audio_packet = disable_dio_audio_packet, + .ext = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, + .enable_dp_link_output = enable_dio_dp_link_output, + .set_dp_link_test_pattern = set_dio_dp_link_test_pattern, + .set_dp_lane_settings = set_dio_dp_lane_settings, + .update_stream_allocation_table = update_dpia_stream_allocation_table, + }, +}; + +bool can_use_dpia_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign; +} + +const struct link_hwss *get_dpia_link_hwss(void) +{ + return &dpia_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h new file mode 100644 index 0000000000..ad16ec5d9b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_dpia.h @@ -0,0 +1,34 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_DPIA_H__ +#define __LINK_HWSS_DPIA_H__ + +#include "link_hwss.h" + +const struct link_hwss *get_dpia_link_hwss(void); +bool can_use_dpia_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); + +#endif /* __LINK_HWSS_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c new file mode 100644 index 0000000000..e125740435 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.c @@ -0,0 +1,216 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_hpo_dp.h" +#include "dm_helpers.h" +#include "core_types.h" +#include "dccg.h" +#include "clk_mgr.h" + +void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) +{ + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = + pipe_ctx->stream_res.hpo_dp_stream_enc; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = + pipe_ctx->link_res.hpo_dp_link_enc; + + hpo_dp_link_encoder->funcs->set_throttled_vcp_size(hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + throttled_vcp_size); +} + +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size) +{ + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = + pipe_ctx->stream_res.hpo_dp_stream_enc; + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank; + uint32_t link_bw_in_kbps = + hpo_dp_stream_encoder->ctx->dc->link_srv->dp_link_bandwidth_kbps( + pipe_ctx->stream->link, link_settings); + uint16_t hblank_min_symbol_width = 0; + + if (link_bw_in_kbps > 0) { + h_blank_in_ms = dc_fixpt_div(dc_fixpt_from_int( + timing->h_total - timing->h_addressable), + dc_fixpt_from_fraction(timing->pix_clk_100hz, 10)); + time_slot_in_ms = dc_fixpt_from_fraction(32 * 4, link_bw_in_kbps); + mtp_cnt_per_h_blank = dc_fixpt_div(h_blank_in_ms, + dc_fixpt_mul_int(time_slot_in_ms, 64)); + hblank_min_symbol_width = dc_fixpt_floor( + dc_fixpt_mul(mtp_cnt_per_h_blank, throttled_vcp_size)); + } + + hpo_dp_stream_encoder->funcs->set_hblank_min_symbol_width(hpo_dp_stream_encoder, + hblank_min_symbol_width); +} + +void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; + + stream_enc->funcs->enable_stream(stream_enc); + stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); +} + +void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) +{ + struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + + stream_enc->funcs->disable(stream_enc); +} + +void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx) +{ + struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + + stream_enc->funcs->set_stream_attribute( + stream_enc, + &stream->timing, + stream->output_color_space, + stream->use_vsc_sdp_for_colorimetry, + stream->timing.flags.DSC, + false); + link->dc->link_srv->dp_trace_source_sequence(link, + DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); +} + +void enable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + true); + link_res->hpo_dp_link_enc->funcs->enable_link_phy( + link_res->hpo_dp_link_enc, + link_settings, + link->link_enc->transmitter, + link->link_enc->hpd_source); +} + +void disable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + link_res->hpo_dp_link_enc->funcs->link_disable(link_res->hpo_dp_link_enc); + link_res->hpo_dp_link_enc->funcs->disable_link_phy( + link_res->hpo_dp_link_enc, signal); + if (link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating) + link->dc->res_pool->dccg->funcs->set_symclk32_le_root_clock_gating( + link->dc->res_pool->dccg, + link_res->hpo_dp_link_enc->inst, + false); +} + +static void set_hpo_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( + link_res->hpo_dp_link_enc, tp_params); + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +static void set_hpo_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, + link_settings, + lane_settings[0].FFE_PRESET.raw); +} + +void update_hpo_dp_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table) +{ + link_res->hpo_dp_link_enc->funcs->update_stream_allocation_table( + link_res->hpo_dp_link_enc, + table); +} + +void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output, uint32_t audio_inst) +{ + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( + pipe_ctx->stream_res.hpo_dp_stream_enc, + audio_inst, + &pipe_ctx->stream->audio_info); +} + +void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +{ + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable( + pipe_ctx->stream_res.hpo_dp_stream_enc); +} + +void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx) +{ + if (pipe_ctx->stream_res.audio) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( + pipe_ctx->stream_res.hpo_dp_stream_enc); +} + +static const struct link_hwss hpo_dp_link_hwss = { + .setup_stream_encoder = setup_hpo_dp_stream_encoder, + .reset_stream_encoder = reset_hpo_dp_stream_encoder, + .setup_stream_attribute = setup_hpo_dp_stream_attribute, + .disable_link_output = disable_hpo_dp_link_output, + .setup_audio_output = setup_hpo_dp_audio_output, + .enable_audio_packet = enable_hpo_dp_audio_packet, + .disable_audio_packet = disable_hpo_dp_audio_packet, + .ext = { + .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size, + .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, + .enable_dp_link_output = enable_hpo_dp_link_output, + .set_dp_link_test_pattern = set_hpo_dp_link_test_pattern, + .set_dp_lane_settings = set_hpo_dp_lane_settings, + .update_stream_allocation_table = update_hpo_dp_stream_allocation_table, + }, +}; + +bool can_use_hpo_dp_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link_res->hpo_dp_link_enc != NULL; +} + +const struct link_hwss *get_hpo_dp_link_hwss(void) +{ + return &hpo_dp_link_hwss; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h new file mode 100644 index 0000000000..1d3ed8ca83 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_dp.h @@ -0,0 +1,62 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_HPO_DP_H__ +#define __LINK_HWSS_HPO_DP_H__ + +#include "link_hwss.h" +#include "link.h" + +void set_hpo_dp_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); +void set_hpo_dp_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); +void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx); +void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx); +void setup_hpo_dp_stream_attribute(struct pipe_ctx *pipe_ctx); +void enable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); +void disable_hpo_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); +void update_hpo_dp_stream_allocation_table(struct dc_link *link, + const struct link_resource *link_res, + const struct link_mst_stream_allocation_table *table); +void setup_hpo_dp_audio_output(struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output, uint32_t audio_inst); +void enable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx); +void disable_hpo_dp_audio_packet(struct pipe_ctx *pipe_ctx); +const struct link_hwss *get_hpo_dp_link_hwss(void); +bool can_use_hpo_dp_link_hwss(const struct dc_link *link, + const struct link_resource *link_res); + + +#endif /* __LINK_HWSS_HPO_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c new file mode 100644 index 0000000000..b621b97711 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -0,0 +1,229 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_hpo_dp.h" +#include "link_hwss_hpo_fixed_vs_pe_retimer_dp.h" +#include "link_hwss_dio_fixed_vs_pe_retimer.h" + +static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, + const struct dc_lane_settings *hw_lane_settings) +{ + const uint8_t vendor_ffe_preset_table[16] = { + 0x01, 0x41, 0x61, 0x81, + 0xB1, 0x05, 0x35, 0x65, + 0x85, 0xA5, 0x09, 0x39, + 0x59, 0x89, 0x0F, 0x24}; + + const uint8_t ffe_mask[4] = { + (hw_lane_settings[0].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[0].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[1].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[1].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[2].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[2].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF), + (hw_lane_settings[3].FFE_PRESET.settings.no_deemphasis != 0 ? 0x0F : 0xFF) + & (hw_lane_settings[3].FFE_PRESET.settings.no_preshoot != 0 ? 0xF1 : 0xFF)}; + + const uint8_t ffe_cfg[4] = { + vendor_ffe_preset_table[hw_lane_settings[0].FFE_PRESET.settings.level] & ffe_mask[0], + vendor_ffe_preset_table[hw_lane_settings[1].FFE_PRESET.settings.level] & ffe_mask[1], + vendor_ffe_preset_table[hw_lane_settings[2].FFE_PRESET.settings.level] & ffe_mask[2], + vendor_ffe_preset_table[hw_lane_settings[3].FFE_PRESET.settings.level] & ffe_mask[3]}; + + const uint8_t dp_type = dp_dio_fixed_vs_pe_retimer_lane_cfg_to_hw_cfg(link); + + const uint8_t vendor_lttpr_write_data_ffe1[4] = {0x01, 0x50, dp_type, 0x0F}; + const uint8_t vendor_lttpr_write_data_ffe2[4] = {0x01, 0x55, dp_type, ffe_cfg[0]}; + const uint8_t vendor_lttpr_write_data_ffe3[4] = {0x01, 0x56, dp_type, ffe_cfg[1]}; + const uint8_t vendor_lttpr_write_data_ffe4[4] = {0x01, 0x57, dp_type, ffe_cfg[2]}; + const uint8_t vendor_lttpr_write_data_ffe5[4] = {0x01, 0x58, dp_type, ffe_cfg[3]}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe1[0], sizeof(vendor_lttpr_write_data_ffe1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe2[0], sizeof(vendor_lttpr_write_data_ffe2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe3[0], sizeof(vendor_lttpr_write_data_ffe3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe4[0], sizeof(vendor_lttpr_write_data_ffe4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_ffe5[0], sizeof(vendor_lttpr_write_data_ffe5)); +} + +static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; + const uint8_t vendor_lttpr_write_data_pg1[4] = {0x1, 0x50, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg2[4] = {0x1, 0x51, 0x50, 0x0}; + const uint8_t vendor_lttpr_write_data_pg3[4] = {0x1, 0x10, 0x58, 0x21}; + const uint8_t vendor_lttpr_write_data_pg4[4] = {0x1, 0x10, 0x59, 0x21}; + const uint8_t vendor_lttpr_write_data_pg5[4] = {0x1, 0x1C, 0x58, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg6[4] = {0x1, 0x1C, 0x59, 0x4F}; + const uint8_t vendor_lttpr_write_data_pg7[4] = {0x1, 0x30, 0x51, 0x20}; + const uint8_t vendor_lttpr_write_data_pg8[4] = {0x1, 0x30, 0x52, 0x20}; + const uint8_t vendor_lttpr_write_data_pg9[4] = {0x1, 0x30, 0x54, 0x20}; + const uint8_t vendor_lttpr_write_data_pg10[4] = {0x1, 0x30, 0x55, 0x20}; + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg0[0], sizeof(vendor_lttpr_write_data_pg0)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg1[0], sizeof(vendor_lttpr_write_data_pg1)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg2[0], sizeof(vendor_lttpr_write_data_pg2)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg3[0], sizeof(vendor_lttpr_write_data_pg3)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg4[0], sizeof(vendor_lttpr_write_data_pg4)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg5[0], sizeof(vendor_lttpr_write_data_pg5)); + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg6[0], sizeof(vendor_lttpr_write_data_pg6)); + + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg7[0], sizeof(vendor_lttpr_write_data_pg7)); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg8[0], sizeof(vendor_lttpr_write_data_pg8)); + + if (link->cur_link_settings.lane_count == LANE_COUNT_FOUR) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg9[0], sizeof(vendor_lttpr_write_data_pg9)); + + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pg10[0], sizeof(vendor_lttpr_write_data_pg10)); +} + +static bool dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern(struct dc_link *link, + const struct link_resource *link_res, struct encoder_set_dp_phy_pattern_param *tp_params, + const struct link_hwss *link_hwss) +{ + struct encoder_set_dp_phy_pattern_param hw_tp_params = { 0 }; + const uint8_t vendor_lttpr_exit_manual_automation_0[4] = {0x1, 0x11, 0x0, 0x06}; + + if (tp_params == NULL) + return false; + + if (tp_params->dp_phy_pattern < DP_TEST_PATTERN_SQUARE_BEGIN || + tp_params->dp_phy_pattern > DP_TEST_PATTERN_SQUARE_END) { + // Deprogram overrides from previously set square wave override + if (link->current_test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM || + link->current_test_pattern == DP_TEST_PATTERN_D102) + link->dc->link_srv->configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_exit_manual_automation_0[0], + sizeof(vendor_lttpr_exit_manual_automation_0)); + else + dp_dio_fixed_vs_pe_retimer_exit_manual_automation(link); + + return false; + } + + hw_tp_params.dp_phy_pattern = DP_TEST_PATTERN_PRBS31; + hw_tp_params.dp_panel_mode = tp_params->dp_panel_mode; + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &hw_tp_params); + + dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(link, tp_params); + + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &link->cur_lane_setting[0]); + + return true; +} + +static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *link, + const struct link_resource *link_res, + struct encoder_set_dp_phy_pattern_param *tp_params) +{ + if (!dp_hpo_fixed_vs_pe_retimer_set_override_test_pattern( + link, link_res, tp_params, get_hpo_dp_link_hwss())) { + link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( + link_res->hpo_dp_link_enc, tp_params); + } + link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); +} + +static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + const struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + link_res->hpo_dp_link_enc->funcs->set_ffe( + link_res->hpo_dp_link_enc, + link_settings, + lane_settings[0].FFE_PRESET.raw); + + // FFE is programmed when retimer is programmed for SQ128, but explicit + // programming needed here as well in case FFE-only update is requested + if (link->current_test_pattern >= DP_TEST_PATTERN_SQUARE_BEGIN && + link->current_test_pattern <= DP_TEST_PATTERN_SQUARE_END) + dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(link, &lane_settings[0]); +} + +static void enable_hpo_fixed_vs_pe_retimer_dp_link_output(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + if (link_settings->lane_count == LANE_COUNT_FOUR) + enable_dio_fixed_vs_pe_retimer_program_4lane_output(link); + + enable_hpo_dp_link_output(link, link_res, signal, clock_source, link_settings); +} + +static const struct link_hwss hpo_fixed_vs_pe_retimer_dp_link_hwss = { + .setup_stream_encoder = setup_hpo_dp_stream_encoder, + .reset_stream_encoder = reset_hpo_dp_stream_encoder, + .setup_stream_attribute = setup_hpo_dp_stream_attribute, + .disable_link_output = disable_hpo_dp_link_output, + .setup_audio_output = setup_hpo_dp_audio_output, + .enable_audio_packet = enable_hpo_dp_audio_packet, + .disable_audio_packet = disable_hpo_dp_audio_packet, + .ext = { + .set_throttled_vcp_size = set_hpo_dp_throttled_vcp_size, + .set_hblank_min_symbol_width = set_hpo_dp_hblank_min_symbol_width, + .enable_dp_link_output = enable_hpo_fixed_vs_pe_retimer_dp_link_output, + .set_dp_link_test_pattern = set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern, + .set_dp_lane_settings = set_hpo_fixed_vs_pe_retimer_dp_lane_settings, + .update_stream_allocation_table = update_hpo_dp_stream_allocation_table, + }, +}; + +bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link) +{ + if (!(link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN)) + return false; + + if (!link->dpcd_caps.lttpr_caps.main_link_channel_coding.bits.DP_128b_132b_SUPPORTED) + return false; + + return true; +} + +const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void) +{ + return &hpo_fixed_vs_pe_retimer_dp_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h new file mode 100644 index 0000000000..82301187bc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.h @@ -0,0 +1,33 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ +#define __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ + +#include "link.h" + +bool requires_fixed_vs_pe_retimer_hpo_link_hwss(const struct dc_link *link); +const struct link_hwss *get_hpo_fixed_vs_pe_retimer_dp_link_hwss(void); + +#endif /* __LINK_HWSS_HPO_FIXED_VS_PE_RETIMER_DP_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c new file mode 100644 index 0000000000..c7a9e286a5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -0,0 +1,1444 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file manages link detection states and receiver states by using various + * link protocols. It also provides helper functions to interpret certain + * capabilities or status based on the states it manages or retrieve them + * directly from connected receivers. + */ + +#include "link_dpms.h" +#include "link_detection.h" +#include "link_hwss.h" +#include "protocols/link_edp_panel_control.h" +#include "protocols/link_ddc.h" +#include "protocols/link_hpd.h" +#include "protocols/link_dpcd.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_dpia.h" +#include "protocols/link_dp_phy.h" +#include "protocols/link_dp_training.h" +#include "accessories/link_dp_trace.h" + +#include "link_enc_cfg.h" +#include "dm_helpers.h" +#include "clk_mgr.h" + +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ + DC_LOG_HW_HOTPLUG( \ + __VA_ARGS__) +/* + * Some receivers fail to train on first try and are good + * on subsequent tries. 2 retries should be plenty. If we + * don't have a successful training then we don't expect to + * ever get one. + */ +#define LINK_TRAINING_MAX_VERIFY_RETRY 2 + +static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; + +static const uint8_t dp_hdmi_dongle_signature_str[] = "DP-HDMI ADAPTOR"; + +static enum ddc_transaction_type get_ddc_transaction_type(enum signal_type sink_signal) +{ + enum ddc_transaction_type transaction_type = DDC_TRANSACTION_TYPE_NONE; + + switch (sink_signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + case SIGNAL_TYPE_LVDS: + case SIGNAL_TYPE_RGB: + transaction_type = DDC_TRANSACTION_TYPE_I2C; + break; + + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_EDP: + transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + break; + + case SIGNAL_TYPE_DISPLAY_PORT_MST: + /* MST does not use I2COverAux, but there is the + * SPECIAL use case for "immediate dwnstrm device + * access" (EPR#370830). + */ + transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + break; + + default: + break; + } + + return transaction_type; +} + +static enum signal_type get_basic_signal_type(struct graphics_object_id encoder, + struct graphics_object_id downstream) +{ + if (downstream.type == OBJECT_TYPE_CONNECTOR) { + switch (downstream.id) { + case CONNECTOR_ID_SINGLE_LINK_DVII: + switch (encoder.id) { + case ENCODER_ID_INTERNAL_DAC1: + case ENCODER_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_ID_INTERNAL_DAC2: + case ENCODER_ID_INTERNAL_KLDSCP_DAC2: + return SIGNAL_TYPE_RGB; + default: + return SIGNAL_TYPE_DVI_SINGLE_LINK; + } + break; + case CONNECTOR_ID_DUAL_LINK_DVII: + { + switch (encoder.id) { + case ENCODER_ID_INTERNAL_DAC1: + case ENCODER_ID_INTERNAL_KLDSCP_DAC1: + case ENCODER_ID_INTERNAL_DAC2: + case ENCODER_ID_INTERNAL_KLDSCP_DAC2: + return SIGNAL_TYPE_RGB; + default: + return SIGNAL_TYPE_DVI_DUAL_LINK; + } + } + break; + case CONNECTOR_ID_SINGLE_LINK_DVID: + return SIGNAL_TYPE_DVI_SINGLE_LINK; + case CONNECTOR_ID_DUAL_LINK_DVID: + return SIGNAL_TYPE_DVI_DUAL_LINK; + case CONNECTOR_ID_VGA: + return SIGNAL_TYPE_RGB; + case CONNECTOR_ID_HDMI_TYPE_A: + return SIGNAL_TYPE_HDMI_TYPE_A; + case CONNECTOR_ID_LVDS: + return SIGNAL_TYPE_LVDS; + case CONNECTOR_ID_DISPLAY_PORT: + case CONNECTOR_ID_USBC: + return SIGNAL_TYPE_DISPLAY_PORT; + case CONNECTOR_ID_EDP: + return SIGNAL_TYPE_EDP; + default: + return SIGNAL_TYPE_NONE; + } + } else if (downstream.type == OBJECT_TYPE_ENCODER) { + switch (downstream.id) { + case ENCODER_ID_EXTERNAL_NUTMEG: + case ENCODER_ID_EXTERNAL_TRAVIS: + return SIGNAL_TYPE_DISPLAY_PORT; + default: + return SIGNAL_TYPE_NONE; + } + } + + return SIGNAL_TYPE_NONE; +} + +/* + * @brief + * Detect output sink type + */ +static enum signal_type link_detect_sink_signal_type(struct dc_link *link, + enum dc_detect_reason reason) +{ + enum signal_type result; + struct graphics_object_id enc_id; + + if (link->is_dig_mapping_flexible) + enc_id = (struct graphics_object_id){.id = ENCODER_ID_UNKNOWN}; + else + enc_id = link->link_enc->id; + result = get_basic_signal_type(enc_id, link->link_id); + + /* Use basic signal type for link without physical connector. */ + if (link->ep_type != DISPLAY_ENDPOINT_PHY) + return result; + + /* Internal digital encoder will detect only dongles + * that require digital signal + */ + + /* Detection mechanism is different + * for different native connectors. + * LVDS connector supports only LVDS signal; + * PCIE is a bus slot, the actual connector needs to be detected first; + * eDP connector supports only eDP signal; + * HDMI should check straps for audio + */ + + /* PCIE detects the actual connector on add-on board */ + if (link->link_id.id == CONNECTOR_ID_PCIE) { + /* ZAZTODO implement PCIE add-on card detection */ + } + + switch (link->link_id.id) { + case CONNECTOR_ID_HDMI_TYPE_A: { + /* check audio support: + * if native HDMI is not supported, switch to DVI + */ + struct audio_support *aud_support = + &link->dc->res_pool->audio_support; + + if (!aud_support->hdmi_audio_native) + if (link->link_id.id == CONNECTOR_ID_HDMI_TYPE_A) + result = SIGNAL_TYPE_DVI_SINGLE_LINK; + } + break; + case CONNECTOR_ID_DISPLAY_PORT: + case CONNECTOR_ID_USBC: { + /* DP HPD short pulse. Passive DP dongle will not + * have short pulse + */ + if (reason != DETECT_REASON_HPDRX) { + /* Check whether DP signal detected: if not - + * we assume signal is DVI; it could be corrected + * to HDMI after dongle detection + */ + if (!dm_helpers_is_dp_sink_present(link)) + result = SIGNAL_TYPE_DVI_SINGLE_LINK; + } + } + break; + default: + break; + } + + return result; +} + +static enum signal_type decide_signal_from_strap_and_dongle_type(enum display_dongle_type dongle_type, + struct audio_support *audio_support) +{ + enum signal_type signal = SIGNAL_TYPE_NONE; + + switch (dongle_type) { + case DISPLAY_DONGLE_DP_HDMI_DONGLE: + if (audio_support->hdmi_audio_on_dongle) + signal = SIGNAL_TYPE_HDMI_TYPE_A; + else + signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + case DISPLAY_DONGLE_DP_DVI_DONGLE: + signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: + if (audio_support->hdmi_audio_native) + signal = SIGNAL_TYPE_HDMI_TYPE_A; + else + signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + default: + signal = SIGNAL_TYPE_NONE; + break; + } + + return signal; +} + +static void read_scdc_caps(struct ddc_service *ddc_service, + struct dc_sink *sink) +{ + uint8_t slave_address = HDMI_SCDC_ADDRESS; + uint8_t offset = HDMI_SCDC_MANUFACTURER_OUI; + + link_query_ddc_data(ddc_service, slave_address, &offset, + sizeof(offset), sink->scdc_caps.manufacturer_OUI.byte, + sizeof(sink->scdc_caps.manufacturer_OUI.byte)); + + offset = HDMI_SCDC_DEVICE_ID; + + link_query_ddc_data(ddc_service, slave_address, &offset, + sizeof(offset), &(sink->scdc_caps.device_id.byte), + sizeof(sink->scdc_caps.device_id.byte)); +} + +static bool i2c_read( + struct ddc_service *ddc, + uint32_t address, + uint8_t *buffer, + uint32_t len) +{ + uint8_t offs_data = 0; + struct i2c_payload payloads[2] = { + { + .write = true, + .address = address, + .length = 1, + .data = &offs_data }, + { + .write = false, + .address = address, + .length = len, + .data = buffer } }; + + struct i2c_command command = { + .payloads = payloads, + .number_of_payloads = 2, + .engine = DDC_I2C_COMMAND_ENGINE, + .speed = ddc->ctx->dc->caps.i2c_speed_in_khz }; + + return dm_helpers_submit_i2c( + ddc->ctx, + ddc->link, + &command); +} + +enum { + DP_SINK_CAP_SIZE = + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV + 1 +}; + +static void query_dp_dual_mode_adaptor( + struct ddc_service *ddc, + struct display_sink_capability *sink_cap) +{ + uint8_t i; + bool is_valid_hdmi_signature; + enum display_dongle_type *dongle = &sink_cap->dongle_type; + uint8_t type2_dongle_buf[DP_ADAPTOR_TYPE2_SIZE]; + bool is_type2_dongle = false; + int retry_count = 2; + struct dp_hdmi_dongle_signature_data *dongle_signature; + + /* Assume we have no valid DP passive dongle connected */ + *dongle = DISPLAY_DONGLE_NONE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_HDMI_SAFE_MAX_TMDS_CLK; + + /* Read DP-HDMI dongle I2c (no response interpreted as DP-DVI dongle)*/ + if (!i2c_read( + ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) { + /* Passive HDMI dongles can sometimes fail here without retrying*/ + while (retry_count > 0) { + if (i2c_read(ddc, + DP_HDMI_DONGLE_ADDRESS, + type2_dongle_buf, + sizeof(type2_dongle_buf))) + break; + retry_count--; + } + if (retry_count == 0) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + return; + } + } + + /* Check if Type 2 dongle.*/ + if (type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_ID] == DP_ADAPTOR_TYPE2_ID) + is_type2_dongle = true; + + dongle_signature = + (struct dp_hdmi_dongle_signature_data *)type2_dongle_buf; + + is_valid_hdmi_signature = true; + + /* Check EOT */ + if (dongle_signature->eot != DP_HDMI_DONGLE_SIGNATURE_EOT) { + is_valid_hdmi_signature = false; + } + + /* Check signature */ + for (i = 0; i < sizeof(dongle_signature->id); ++i) { + /* If its not the right signature, + * skip mismatch in subversion byte.*/ + if (dongle_signature->id[i] != + dp_hdmi_dongle_signature_str[i] && i != 3) { + + if (is_type2_dongle) { + is_valid_hdmi_signature = false; + break; + } + + } + } + + if (is_type2_dongle) { + uint32_t max_tmds_clk = + type2_dongle_buf[DP_ADAPTOR_TYPE2_REG_MAX_TMDS_CLK]; + + max_tmds_clk = max_tmds_clk * 2 + max_tmds_clk / 2; + + if (0 == max_tmds_clk || + max_tmds_clk < DP_ADAPTOR_TYPE2_MIN_TMDS_CLK || + max_tmds_clk > DP_ADAPTOR_TYPE2_MAX_TMDS_CLK) { + *dongle = DISPLAY_DONGLE_DP_DVI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "DP-DVI passive dongle %dMhz: ", + DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000); + } else { + if (is_valid_hdmi_signature == true) { + *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 2 DP-HDMI passive dongle %dMhz: ", + max_tmds_clk); + } else { + *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 2 DP-HDMI passive dongle (no signature) %dMhz: ", + max_tmds_clk); + + } + + /* Multiply by 1000 to convert to kHz. */ + sink_cap->max_hdmi_pixel_clock = + max_tmds_clk * 1000; + } + sink_cap->is_dongle_type_one = false; + + } else { + if (is_valid_hdmi_signature == true) { + *dongle = DISPLAY_DONGLE_DP_HDMI_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 1 DP-HDMI passive dongle %dMhz: ", + sink_cap->max_hdmi_pixel_clock / 1000); + } else { + *dongle = DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE; + + CONN_DATA_DETECT(ddc->link, type2_dongle_buf, + sizeof(type2_dongle_buf), + "Type 1 DP-HDMI passive dongle (no signature) %dMhz: ", + sink_cap->max_hdmi_pixel_clock / 1000); + } + sink_cap->is_dongle_type_one = true; + } + + return; +} + +static enum signal_type dp_passive_dongle_detection(struct ddc_service *ddc, + struct display_sink_capability *sink_cap, + struct audio_support *audio_support) +{ + query_dp_dual_mode_adaptor(ddc, sink_cap); + + return decide_signal_from_strap_and_dongle_type(sink_cap->dongle_type, + audio_support); +} + +static void link_disconnect_sink(struct dc_link *link) +{ + if (link->local_sink) { + dc_sink_release(link->local_sink); + link->local_sink = NULL; + } + + link->dpcd_sink_count = 0; + //link->dpcd_caps.dpcd_rev.raw = 0; +} + +static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *link) +{ + dc_sink_release(link->local_sink); + link->local_sink = prev_sink; +} + +static void query_hdcp_capability(enum signal_type signal, struct dc_link *link) +{ + struct hdcp_protection_message msg22; + struct hdcp_protection_message msg14; + + memset(&msg22, 0, sizeof(struct hdcp_protection_message)); + memset(&msg14, 0, sizeof(struct hdcp_protection_message)); + memset(link->hdcp_caps.rx_caps.raw, 0, + sizeof(link->hdcp_caps.rx_caps.raw)); + + if ((link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->ddc->transaction_type == + DDC_TRANSACTION_TYPE_I2C_OVER_AUX) || + link->connector_signal == SIGNAL_TYPE_EDP) { + msg22.data = link->hdcp_caps.rx_caps.raw; + msg22.length = sizeof(link->hdcp_caps.rx_caps.raw); + msg22.msg_id = HDCP_MESSAGE_ID_RX_CAPS; + } else { + msg22.data = &link->hdcp_caps.rx_caps.fields.version; + msg22.length = sizeof(link->hdcp_caps.rx_caps.fields.version); + msg22.msg_id = HDCP_MESSAGE_ID_HDCP2VERSION; + } + msg22.version = HDCP_VERSION_22; + msg22.link = HDCP_LINK_PRIMARY; + msg22.max_retries = 5; + dc_process_hdcp_msg(signal, link, &msg22); + + if (signal == SIGNAL_TYPE_DISPLAY_PORT || signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + msg14.data = &link->hdcp_caps.bcaps.raw; + msg14.length = sizeof(link->hdcp_caps.bcaps.raw); + msg14.msg_id = HDCP_MESSAGE_ID_READ_BCAPS; + msg14.version = HDCP_VERSION_14; + msg14.link = HDCP_LINK_PRIMARY; + msg14.max_retries = 5; + + dc_process_hdcp_msg(signal, link, &msg14); + } + +} +static void read_current_link_settings_on_detect(struct dc_link *link) +{ + union lane_count_set lane_count_set = {0}; + uint8_t link_bw_set; + uint8_t link_rate_set; + uint32_t read_dpcd_retry_cnt = 10; + enum dc_status status = DC_ERROR_UNEXPECTED; + int i; + union max_down_spread max_down_spread = {0}; + + // Read DPCD 00101h to find out the number of lanes currently set + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd(link, + DP_LANE_COUNT_SET, + &lane_count_set.raw, + sizeof(lane_count_set)); + /* First DPCD read after VDD ON can fail if the particular board + * does not have HPD pin wired correctly. So if DPCD read fails, + * which it should never happen, retry a few times. Target worst + * case scenario of 80 ms. + */ + if (status == DC_OK) { + link->cur_link_settings.lane_count = + lane_count_set.bits.LANE_COUNT_SET; + break; + } + + msleep(8); + } + + // Read DPCD 00100h to find if standard link rates are set + core_link_read_dpcd(link, DP_LINK_BW_SET, + &link_bw_set, sizeof(link_bw_set)); + + if (link_bw_set == 0) { + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the edp link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // edp_supported_link_rates_count = 0 for DP + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + // Link Rate not found. Seamless boot may not work. + ASSERT(false); + } + } else { + link->cur_link_settings.link_rate = link_bw_set; + link->cur_link_settings.use_link_rate_set = false; + } + // Read DPCD 00003h to find the max down spread. + core_link_read_dpcd(link, DP_MAX_DOWNSPREAD, + &max_down_spread.raw, sizeof(max_down_spread)); + link->cur_link_settings.link_spread = + max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; +} + +static bool detect_dp(struct dc_link *link, + struct display_sink_capability *sink_caps, + enum dc_detect_reason reason) +{ + struct audio_support *audio_support = &link->dc->res_pool->audio_support; + + sink_caps->signal = link_detect_sink_signal_type(link, reason); + sink_caps->transaction_type = + get_ddc_transaction_type(sink_caps->signal); + + if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { + sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; + if (!detect_dp_sink_caps(link)) + return false; + + if (is_dp_branch_device(link)) + /* DP SST branch */ + link->type = dc_connection_sst_branch; + } else { + if (link->dc->debug.disable_dp_plus_plus_wa && + link->link_enc->features.flags.bits.IS_UHBR20_CAPABLE) + return false; + + /* DP passive dongles */ + sink_caps->signal = dp_passive_dongle_detection(link->ddc, + sink_caps, + audio_support); + link->dpcd_caps.dongle_type = sink_caps->dongle_type; + link->dpcd_caps.is_dongle_type_one = sink_caps->is_dongle_type_one; + link->dpcd_caps.dpcd_rev.raw = 0; + } + + return true; +} + +static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) +{ + if (old_edid->length != new_edid->length) + return false; + + if (new_edid->length == 0) + return false; + + return (memcmp(old_edid->raw_edid, + new_edid->raw_edid, new_edid->length) == 0); +} + +static bool wait_for_entering_dp_alt_mode(struct dc_link *link) +{ + + /** + * something is terribly wrong if time out is > 200ms. (5Hz) + * 500 microseconds * 400 tries us 200 ms + **/ + unsigned int sleep_time_in_microseconds = 500; + unsigned int tries_allowed = 400; + bool is_in_alt_mode; + unsigned long long enter_timestamp; + unsigned long long finish_timestamp; + unsigned long long time_taken_in_ns; + int tries_taken; + + DC_LOGGER_INIT(link->ctx->logger); + + /** + * this function will only exist if we are on dcn21 (is_in_alt_mode is a + * function pointer, so checking to see if it is equal to 0 is the same + * as checking to see if it is null + **/ + if (!link->link_enc->funcs->is_in_alt_mode) + return true; + + is_in_alt_mode = link->link_enc->funcs->is_in_alt_mode(link->link_enc); + DC_LOG_DC("DP Alt mode state on HPD: %d\n", is_in_alt_mode); + + if (is_in_alt_mode) + return true; + + enter_timestamp = dm_get_timestamp(link->ctx); + + for (tries_taken = 0; tries_taken < tries_allowed; tries_taken++) { + udelay(sleep_time_in_microseconds); + /* ask the link if alt mode is enabled, if so return ok */ + if (link->link_enc->funcs->is_in_alt_mode(link->link_enc)) { + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = + dm_get_elapse_time_in_ns(link->ctx, + finish_timestamp, + enter_timestamp); + DC_LOG_WARNING("Alt mode entered finished after %llu ms\n", + div_u64(time_taken_in_ns, 1000000)); + return true; + } + } + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, + enter_timestamp); + DC_LOG_WARNING("Alt mode has timed out after %llu ms\n", + div_u64(time_taken_in_ns, 1000000)); + return false; +} + +static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +} + +static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; +} + +static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) +{ + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Connected\n", + link->link_index); + + link->type = dc_connection_mst_branch; + apply_dpia_mst_dsc_always_on_wa(link); + + dm_helpers_dp_update_branch_info(link->ctx, link); + if (dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, (reason == DETECT_REASON_BOOT || reason == DETECT_REASON_RESUMEFROMS3S4))) { + link_disconnect_sink(link); + } else { + link->type = dc_connection_sst_branch; + } + + return link->type == dc_connection_mst_branch; +} + +bool link_reset_cur_dp_mst_topology(struct dc_link *link) +{ + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Disconnected\n", + link->link_index); + + revert_dpia_mst_dsc_always_on_wa(link); + return dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); +} + +static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, + enum dc_detect_reason reason) +{ + int i; + bool can_apply_seamless_boot = false; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { + can_apply_seamless_boot = true; + break; + } + } + + return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; +} + +static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ + dc_z10_restore(dc); + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); +} + +static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); +} + +static void verify_link_capability_destructive(struct dc_link *link, + struct dc_sink *sink, + enum dc_detect_reason reason) +{ + bool should_prepare_phy_clocks = + should_prepare_phy_clocks_for_link_verification(link->dc, reason); + + if (should_prepare_phy_clocks) + prepare_phy_clocks_for_destructive_link_verification(link->dc); + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + struct dc_link_settings known_limit_link_setting = + dp_get_max_link_cap(link); + link_set_all_streams_dpms_off_for_link(link); + dp_verify_link_cap_with_retries( + link, &known_limit_link_setting, + LINK_TRAINING_MAX_VERIFY_RETRY); + } else { + ASSERT(0); + } + + if (should_prepare_phy_clocks) + restore_phy_clocks_for_destructive_link_verification(link->dc); +} + +static void verify_link_capability_non_destructive(struct dc_link *link) +{ + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + if (dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* TODO - should we check link encoder's max link caps here? + * How do we know which link encoder to check from? + */ + link->verified_link_cap = link->reported_link_cap; + else + link->verified_link_cap = dp_get_max_link_cap(link); + } +} + +static bool should_verify_link_capability_destructively(struct dc_link *link, + enum dc_detect_reason reason) +{ + bool destrictive = false; + struct dc_link_settings max_link_cap; + bool is_link_enc_unavailable = link->link_enc && + link->dc->res_pool->funcs->link_encs_assign && + !link_enc_cfg_is_link_enc_avail( + link->ctx->dc, + link->link_enc->preferred_engine, + link); + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + max_link_cap = dp_get_max_link_cap(link); + destrictive = true; + + if (link->dc->debug.skip_detection_link_training || + dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + destrictive = false; + } else if (link_dp_get_encoding_format(&max_link_cap) == + DP_8b_10b_ENCODING) { + if (link->dpcd_caps.is_mst_capable || + is_link_enc_unavailable) { + destrictive = false; + } + } + } + + return destrictive; +} + +static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, + enum dc_detect_reason reason) +{ + if (should_verify_link_capability_destructively(link, reason)) + verify_link_capability_destructive(link, sink, reason); + else + verify_link_capability_non_destructive(link); +} + +/* + * detect_link_and_local_sink() - Detect if a sink is attached to a given link + * + * link->local_sink is created or destroyed as needed. + * + * This does not create remote sinks. + */ +static bool detect_link_and_local_sink(struct dc_link *link, + enum dc_detect_reason reason) +{ + struct dc_sink_init_data sink_init_data = { 0 }; + struct display_sink_capability sink_caps = { 0 }; + uint32_t i; + bool converter_disable_audio = false; + struct audio_support *aud_support = &link->dc->res_pool->audio_support; + bool same_edid = false; + enum dc_edid_status edid_status; + struct dc_context *dc_ctx = link->ctx; + struct dc *dc = dc_ctx->dc; + struct dc_sink *sink = NULL; + struct dc_sink *prev_sink = NULL; + struct dpcd_caps prev_dpcd_caps; + enum dc_connection_type new_connection_type = dc_connection_none; + enum dc_connection_type pre_connection_type = link->type; + const uint32_t post_oui_delay = 30; // 30ms + + DC_LOGGER_INIT(link->ctx->logger); + + if (dc_is_virtual_signal(link->connector_signal)) + return false; + + if (((link->connector_signal == SIGNAL_TYPE_LVDS || + link->connector_signal == SIGNAL_TYPE_EDP) && + (!link->dc->config.allow_edp_hotplug_detection)) && + link->local_sink) { + // need to re-write OUI and brightness in resume case + if (link->connector_signal == SIGNAL_TYPE_EDP && + (link->dpcd_sink_ext_caps.bits.oled == 1)) { + dpcd_set_source_specific_data(link); + msleep(post_oui_delay); + set_default_brightness_aux(link); + } + + return true; + } + + if (!link_detect_connection_type(link, &new_connection_type)) { + BREAK_TO_DEBUGGER(); + return false; + } + + prev_sink = link->local_sink; + if (prev_sink) { + dc_sink_retain(prev_sink); + memcpy(&prev_dpcd_caps, &link->dpcd_caps, sizeof(struct dpcd_caps)); + } + + link_disconnect_sink(link); + if (new_connection_type != dc_connection_none) { + link->type = new_connection_type; + link->link_state_valid = false; + + /* From Disconnected-to-Connected. */ + switch (link->connector_signal) { + case SIGNAL_TYPE_HDMI_TYPE_A: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + if (aud_support->hdmi_audio_native) + sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; + else + sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + } + + case SIGNAL_TYPE_DVI_SINGLE_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + } + + case SIGNAL_TYPE_DVI_DUAL_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; + break; + } + + case SIGNAL_TYPE_LVDS: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_LVDS; + break; + } + + case SIGNAL_TYPE_EDP: { + detect_edp_sink_caps(link); + read_current_link_settings_on_detect(link); + + /* Disable power sequence on MIPI panel + converter + */ + if (dc->config.enable_mipi_converter_optimization && + dc_ctx->dce_version == DCN_VERSION_3_01 && + link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_0022B9 && + memcmp(&link->dpcd_caps.branch_dev_name, DP_SINK_BRANCH_DEV_NAME_7580, + sizeof(link->dpcd_caps.branch_dev_name)) == 0) { + dc->config.edp_no_power_sequencing = true; + + if (!link->dpcd_caps.set_power_state_capable_edp) + link->wa_flags.dp_keep_receiver_powered = true; + } + + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_EDP; + break; + } + + case SIGNAL_TYPE_DISPLAY_PORT: { + + /* wa HPD high coming too early*/ + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { + + /* if alt mode times out, return false */ + if (!wait_for_entering_dp_alt_mode(link)) + return false; + } + + if (!detect_dp(link, &sink_caps, reason)) { + link->type = pre_connection_type; + + if (prev_sink) + dc_sink_release(prev_sink); + return false; + } + + /* Active SST downstream branch device unplug*/ + if (link->type == dc_connection_sst_branch && + link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { + if (prev_sink) + /* Downstream unplug */ + dc_sink_release(prev_sink); + return true; + } + + /* disable audio for non DP to HDMI active sst converter */ + if (link->type == dc_connection_sst_branch && + is_dp_active_dongle(link) && + (link->dpcd_caps.dongle_type != + DISPLAY_DONGLE_DP_HDMI_CONVERTER)) + converter_disable_audio = true; + + /* limited link rate to HBR3 for DPIA until we implement USB4 V2 */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->reported_link_cap.link_rate > LINK_RATE_HIGH3) + link->reported_link_cap.link_rate = LINK_RATE_HIGH3; + break; + } + + default: + DC_ERROR("Invalid connector type! signal:%d\n", + link->connector_signal); + if (prev_sink) + dc_sink_release(prev_sink); + return false; + } /* switch() */ + + if (link->dpcd_caps.sink_count.bits.SINK_COUNT) + link->dpcd_sink_count = + link->dpcd_caps.sink_count.bits.SINK_COUNT; + else + link->dpcd_sink_count = 1; + + set_ddc_transaction_type(link->ddc, + sink_caps.transaction_type); + + link->aux_mode = + link_is_in_aux_transaction_mode(link->ddc); + + sink_init_data.link = link; + sink_init_data.sink_signal = sink_caps.signal; + + sink = dc_sink_create(&sink_init_data); + if (!sink) { + DC_ERROR("Failed to create sink!\n"); + if (prev_sink) + dc_sink_release(prev_sink); + return false; + } + + sink->link->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock; + sink->converter_disable_audio = converter_disable_audio; + + /* dc_sink_create returns a new reference */ + link->local_sink = sink; + + edid_status = dm_helpers_read_local_edid(link->ctx, + link, sink); + + switch (edid_status) { + case EDID_BAD_CHECKSUM: + DC_LOG_ERROR("EDID checksum invalid.\n"); + break; + case EDID_PARTIAL_VALID: + DC_LOG_ERROR("Partial EDID valid, abandon invalid blocks.\n"); + break; + case EDID_NO_RESPONSE: + DC_LOG_ERROR("No EDID read.\n"); + /* + * Abort detection for non-DP connectors if we have + * no EDID + * + * DP needs to report as connected if HDP is high + * even if we have no EDID in order to go to + * fail-safe mode + */ + if (dc_is_hdmi_signal(link->connector_signal) || + dc_is_dvi_signal(link->connector_signal)) { + if (prev_sink) + dc_sink_release(prev_sink); + + return false; + } + + if (link->type == dc_connection_sst_branch && + link->dpcd_caps.dongle_type == + DISPLAY_DONGLE_DP_VGA_CONVERTER && + reason == DETECT_REASON_HPDRX) { + /* Abort detection for DP-VGA adapters when EDID + * can't be read and detection reason is VGA-side + * hotplug + */ + if (prev_sink) + dc_sink_release(prev_sink); + link_disconnect_sink(link); + + return true; + } + + break; + default: + break; + } + + // Check if edid is the same + if ((prev_sink) && + (edid_status == EDID_THE_SAME || edid_status == EDID_OK)) + same_edid = is_same_edid(&prev_sink->dc_edid, + &sink->dc_edid); + + if (sink->edid_caps.panel_patch.skip_scdc_overwrite) + link->ctx->dc->debug.hdmi20_disable = true; + + if (sink->edid_caps.panel_patch.remove_sink_ext_caps) + link->dpcd_sink_ext_caps.raw = 0; + + if (dc_is_hdmi_signal(link->connector_signal)) + read_scdc_caps(link->ddc, link->local_sink); + + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + sink_caps.transaction_type == + DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { + /* + * TODO debug why certain monitors don't like + * two link trainings + */ + query_hdcp_capability(sink->sink_signal, link); + } else { + // If edid is the same, then discard new sink and revert back to original sink + if (same_edid) { + link_disconnect_remap(prev_sink, link); + sink = prev_sink; + prev_sink = NULL; + } + query_hdcp_capability(sink->sink_signal, link); + } + + /* HDMI-DVI Dongle */ + if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A && + !sink->edid_caps.edid_hdmi) + sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + + if (link->local_sink && dc_is_dp_signal(sink_caps.signal)) + dp_trace_init(link); + + /* Connectivity log: detection */ + for (i = 0; i < sink->dc_edid.length / DC_EDID_BLOCK_SIZE; i++) { + CONN_DATA_DETECT(link, + &sink->dc_edid.raw_edid[i * DC_EDID_BLOCK_SIZE], + DC_EDID_BLOCK_SIZE, + "%s: [Block %d] ", sink->edid_caps.display_name, i); + } + + DC_LOG_DETECTION_EDID_PARSER("%s: " + "manufacturer_id = %X, " + "product_id = %X, " + "serial_number = %X, " + "manufacture_week = %d, " + "manufacture_year = %d, " + "display_name = %s, " + "speaker_flag = %d, " + "audio_mode_count = %d\n", + __func__, + sink->edid_caps.manufacturer_id, + sink->edid_caps.product_id, + sink->edid_caps.serial_number, + sink->edid_caps.manufacture_week, + sink->edid_caps.manufacture_year, + sink->edid_caps.display_name, + sink->edid_caps.speaker_flags, + sink->edid_caps.audio_mode_count); + + for (i = 0; i < sink->edid_caps.audio_mode_count; i++) { + DC_LOG_DETECTION_EDID_PARSER("%s: mode number = %d, " + "format_code = %d, " + "channel_count = %d, " + "sample_rate = %d, " + "sample_size = %d\n", + __func__, + i, + sink->edid_caps.audio_modes[i].format_code, + sink->edid_caps.audio_modes[i].channel_count, + sink->edid_caps.audio_modes[i].sample_rate, + sink->edid_caps.audio_modes[i].sample_size); + } + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + // Init dc_panel_config by HW config + if (dc_ctx->dc->res_pool->funcs->get_panel_config_defaults) + dc_ctx->dc->res_pool->funcs->get_panel_config_defaults(&link->panel_config); + // Pickup base DM settings + dm_helpers_init_panel_settings(dc_ctx, &link->panel_config, sink); + // Override dc_panel_config if system has specific settings + dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); + + //sink only can use supported link rate table, we are foreced to enable it + if (link->reported_link_cap.link_rate == LINK_RATE_UNKNOWN) + link->panel_config.ilr.optimize_edp_link_rate = true; + if (edp_is_ilr_optimization_enabled(link)) + link->reported_link_cap.link_rate = get_max_link_rate_from_ilr_table(link); + } + + } else { + /* From Connected-to-Disconnected. */ + link->type = dc_connection_none; + sink_caps.signal = SIGNAL_TYPE_NONE; + memset(&link->hdcp_caps, 0, sizeof(struct hdcp_caps)); + /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk + * is not cleared. If we emulate a DP signal on this connection, it thinks + * the dongle is still there and limits the number of modes we can emulate. + * Clear dongle_max_pix_clk on disconnect to fix this + */ + link->dongle_max_pix_clk = 0; + + dc_link_clear_dprx_states(link); + dp_trace_reset(link); + } + + LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", + link->link_index, sink, + (sink_caps.signal == + SIGNAL_TYPE_NONE ? "Disconnected" : "Connected"), + prev_sink, same_edid); + + if (prev_sink) + dc_sink_release(prev_sink); + + return true; +} + +/* + * link_detect_connection_type() - Determine if there is a sink connected + * + * @type: Returned connection type + * Does not detect downstream devices, such as MST sinks + * or display connected through active dongles + */ +bool link_detect_connection_type(struct dc_link *link, enum dc_connection_type *type) +{ + uint32_t is_hpd_high = 0; + + if (link->connector_signal == SIGNAL_TYPE_LVDS) { + *type = dc_connection_single; + return true; + } + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /*in case it is not on*/ + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + } + + /* Link may not have physical HPD pin. */ + if (link->ep_type != DISPLAY_ENDPOINT_PHY) { + if (link->is_hpd_pending || !dpia_query_hpd_status(link)) + *type = dc_connection_none; + else + *type = dc_connection_single; + + return true; + } + + + if (!query_hpd_status(link, &is_hpd_high)) + goto hpd_gpio_failure; + + if (is_hpd_high) { + *type = dc_connection_single; + /* TODO: need to do the actual detection */ + } else { + *type = dc_connection_none; + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* eDP is not connected, power down it */ + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, false); + } + } + + return true; + +hpd_gpio_failure: + return false; +} + +bool link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ + bool is_local_sink_detect_success; + bool is_delegated_to_mst_top_mgr = false; + enum dc_connection_type pre_link_type = link->type; + + DC_LOGGER_INIT(link->ctx->logger); + + is_local_sink_detect_success = detect_link_and_local_sink(link, reason); + + if (is_local_sink_detect_success && link->local_sink) + verify_link_capability(link, link->local_sink, reason); + + DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__, + link->link_index, is_local_sink_detect_success, pre_link_type, link->type); + + if (is_local_sink_detect_success && link->local_sink && + dc_is_dp_signal(link->local_sink->sink_signal) && + link->dpcd_caps.is_mst_capable) + is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); + + if (is_local_sink_detect_success && + pre_link_type == dc_connection_mst_branch && + link->type != dc_connection_mst_branch) + is_delegated_to_mst_top_mgr = link_reset_cur_dp_mst_topology(link); + + return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; +} + +void link_clear_dprx_states(struct dc_link *link) +{ + memset(&link->dprx_states, 0, sizeof(link->dprx_states)); +} + +bool link_is_hdcp14(struct dc_link *link, enum signal_type signal) +{ + bool ret = false; + + switch (signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + ret = link->hdcp_caps.bcaps.bits.HDCP_CAPABLE; + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + /* HDMI doesn't tell us its HDCP(1.4) capability, so assume to always be capable, + * we can poll for bksv but some displays have an issue with this. Since its so rare + * for a display to not be 1.4 capable, this assumtion is ok + */ + ret = true; + break; + default: + break; + } + return ret; +} + +bool link_is_hdcp22(struct dc_link *link, enum signal_type signal) +{ + bool ret = false; + + switch (signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + ret = (link->hdcp_caps.bcaps.bits.HDCP_CAPABLE && + link->hdcp_caps.rx_caps.fields.byte0.hdcp_capable && + (link->hdcp_caps.rx_caps.fields.version == 0x2)) ? 1 : 0; + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + ret = (link->hdcp_caps.rx_caps.fields.version == 0x4) ? 1:0; + break; + default: + break; + } + + return ret; +} + +const struct dc_link_status *link_get_status(const struct dc_link *link) +{ + return &link->link_status; +} + + +static bool link_add_remote_sink_helper(struct dc_link *dc_link, struct dc_sink *sink) +{ + if (dc_link->sink_count >= MAX_SINKS_PER_LINK) { + BREAK_TO_DEBUGGER(); + return false; + } + + dc_sink_retain(sink); + + dc_link->remote_sinks[dc_link->sink_count] = sink; + dc_link->sink_count++; + + return true; +} + +struct dc_sink *link_add_remote_sink( + struct dc_link *link, + const uint8_t *edid, + int len, + struct dc_sink_init_data *init_data) +{ + struct dc_sink *dc_sink; + enum dc_edid_status edid_status; + + if (len > DC_MAX_EDID_BUFFER_SIZE) { + dm_error("Max EDID buffer size breached!\n"); + return NULL; + } + + if (!init_data) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + if (!init_data->link) { + BREAK_TO_DEBUGGER(); + return NULL; + } + + dc_sink = dc_sink_create(init_data); + + if (!dc_sink) + return NULL; + + memmove(dc_sink->dc_edid.raw_edid, edid, len); + dc_sink->dc_edid.length = len; + + if (!link_add_remote_sink_helper( + link, + dc_sink)) + goto fail_add_sink; + + edid_status = dm_helpers_parse_edid_caps( + link, + &dc_sink->dc_edid, + &dc_sink->edid_caps); + + /* + * Treat device as no EDID device if EDID + * parsing fails + */ + if (edid_status != EDID_OK && edid_status != EDID_PARTIAL_VALID) { + dc_sink->dc_edid.length = 0; + dm_error("Bad EDID, status%d!\n", edid_status); + } + + return dc_sink; + +fail_add_sink: + dc_sink_release(dc_sink); + return NULL; +} + +void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink) +{ + int i; + + if (!link->sink_count) { + BREAK_TO_DEBUGGER(); + return; + } + + for (i = 0; i < link->sink_count; i++) { + if (link->remote_sinks[i] == sink) { + dc_sink_release(sink); + link->remote_sinks[i] = NULL; + + /* shrink array to remove empty place */ + while (i < link->sink_count - 1) { + link->remote_sinks[i] = link->remote_sinks[i+1]; + i++; + } + link->remote_sinks[i] = NULL; + link->sink_count--; + return; + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.h b/drivers/gpu/drm/amd/display/dc/link/link_detection.h new file mode 100644 index 0000000000..7da0507872 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.h @@ -0,0 +1,43 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DETECTION_H__ +#define __DC_LINK_DETECTION_H__ +#include "link.h" +bool link_detect(struct dc_link *link, enum dc_detect_reason reason); +bool link_detect_connection_type(struct dc_link *link, + enum dc_connection_type *type); +struct dc_sink *link_add_remote_sink( + struct dc_link *link, + const uint8_t *edid, + int len, + struct dc_sink_init_data *init_data); +void link_remove_remote_sink(struct dc_link *link, struct dc_sink *sink); +bool link_reset_cur_dp_mst_topology(struct dc_link *link); +const struct dc_link_status *link_get_status(const struct dc_link *link); +bool link_is_hdcp14(struct dc_link *link, enum signal_type signal); +bool link_is_hdcp22(struct dc_link *link, enum signal_type signal); +void link_clear_dprx_states(struct dc_link *link); +#endif /* __DC_LINK_DETECTION_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c new file mode 100644 index 0000000000..a96f074762 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -0,0 +1,2541 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns the programming sequence of stream's dpms state associated + * with the link and link's enable/disable sequences as result of the stream's + * dpms state change. + * + * TODO - The reason link owns stream's dpms programming sequence is + * because dpms programming sequence is highly dependent on underlying signal + * specific link protocols. This unfortunately causes link to own a portion of + * stream state programming sequence. This creates a gray area where the + * boundary between link and stream is not clearly defined. + */ + +#include "link_dpms.h" +#include "link_hwss.h" +#include "link_validation.h" +#include "accessories/link_fpga.h" +#include "accessories/link_dp_trace.h" +#include "protocols/link_dpcd.h" +#include "protocols/link_ddc.h" +#include "protocols/link_hpd.h" +#include "protocols/link_dp_phy.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_training.h" +#include "protocols/link_edp_panel_control.h" +#include "protocols/link_dp_dpia_bw.h" + +#include "dm_helpers.h" +#include "link_enc_cfg.h" +#include "resource.h" +#include "dsc.h" +#include "dccg.h" +#include "clk_mgr.h" +#include "atomfirmware.h" +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ + DC_LOG_HW_HOTPLUG( \ + __VA_ARGS__) + +#define RETIMER_REDRIVER_INFO(...) \ + DC_LOG_RETIMER_REDRIVER( \ + __VA_ARGS__) +#include "dc/dcn30/dcn30_vpg.h" + +#define MAX_MTP_SLOT_COUNT 64 +#define LINK_TRAINING_ATTEMPTS 4 +#define PEAK_FACTOR_X1000 1006 + +void link_blank_all_dp_displays(struct dc *dc) +{ + unsigned int i; + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if ((dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) || + (dc->links[i]->priv == NULL) || (dc->links[i]->local_sink == NULL)) + continue; + + /* DP 2.0 spec requires that we read LTTPR caps first */ + dp_retrieve_lttpr_cap(dc->links[i]); + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + link_blank_dp_stream(dc->links[i], true); + } + +} + +void link_blank_all_edp_displays(struct dc *dc) +{ + unsigned int i; + uint8_t dpcd_power_state = '\0'; + enum dc_status status = DC_ERROR_UNEXPECTED; + + for (i = 0; i < dc->link_count; i++) { + if ((dc->links[i]->connector_signal != SIGNAL_TYPE_EDP) || + (!dc->links[i]->edp_sink_present)) + continue; + + /* if any of the displays are lit up turn them off */ + status = core_link_read_dpcd(dc->links[i], DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) + link_blank_dp_stream(dc->links[i], true); + } +} + +void link_blank_dp_stream(struct dc_link *link, bool hw_init) +{ + unsigned int j; + struct dc *dc = link->ctx->dc; + enum signal_type signal = link->connector_signal; + + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) { + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + link->link_enc->funcs->get_dig_frontend && + link->link_enc->funcs->is_dig_enabled(link->link_enc)) { + unsigned int fe = link->link_enc->funcs->get_dig_frontend(link->link_enc); + + if (fe != ENGINE_ID_UNKNOWN) + for (j = 0; j < dc->res_pool->stream_enc_count; j++) { + if (fe == dc->res_pool->stream_enc[j]->id) { + dc->res_pool->stream_enc[j]->funcs->dp_blank(link, + dc->res_pool->stream_enc[j]); + break; + } + } + } + + if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + dpcd_write_rx_power_ctrl(link, false); + } +} + +void link_set_all_streams_dpms_off_for_link(struct dc_link *link) +{ + struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_state *state = link->dc->current_state; + uint8_t count; + int i; + struct dc_stream_update stream_update; + bool dpms_off = true; + struct link_resource link_res = {0}; + + memset(&stream_update, 0, sizeof(stream_update)); + stream_update.dpms_off = &dpms_off; + + link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + + for (i = 0; i < count; i++) { + stream_update.stream = pipes[i]->stream; + dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, + pipes[i]->stream, &stream_update, + state); + } + + /* link can be also enabled by vbios. In this case it is not recorded + * in pipe_ctx. Disable link phy here to make sure it is completely off + */ + dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +void link_resume(struct dc_link *link) +{ + if (link->connector_signal != SIGNAL_TYPE_VIRTUAL) + program_hpd_filter(link); +} + +/* This function returns true if the pipe is used to feed video signal directly + * to the link. + */ +static bool is_master_pipe_for_link(const struct dc_link *link, + const struct pipe_ctx *pipe) +{ + return resource_is_pipe_type(pipe, OTG_MASTER) && + pipe->stream->link == link; +} + +/* + * This function finds all master pipes feeding to a given link with dpms set to + * on in given dc state. + */ +void link_get_master_pipes_with_dpms_on(const struct dc_link *link, + struct dc_state *state, + uint8_t *count, + struct pipe_ctx *pipes[MAX_PIPES]) +{ + int i; + struct pipe_ctx *pipe = NULL; + + *count = 0; + for (i = 0; i < MAX_PIPES; i++) { + pipe = &state->res_ctx.pipe_ctx[i]; + + if (is_master_pipe_for_link(link, pipe) && + pipe->stream->dpms_off == false) { + pipes[(*count)++] = pipe; + } + } +} + +static bool get_ext_hdmi_settings(struct pipe_ctx *pipe_ctx, + enum engine_id eng_id, + struct ext_hdmi_settings *settings) +{ + bool result = false; + int i = 0; + struct integrated_info *integrated_info = + pipe_ctx->stream->ctx->dc_bios->integrated_info; + + if (integrated_info == NULL) + return false; + + /* + * Get retimer settings from sbios for passing SI eye test for DCE11 + * The setting values are varied based on board revision and port id + * Therefore the setting values of each ports is passed by sbios. + */ + + // Check if current bios contains ext Hdmi settings + if (integrated_info->gpu_cap_info & 0x20) { + switch (eng_id) { + case ENGINE_ID_DIGA: + settings->slv_addr = integrated_info->dp0_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp0_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp0_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp0_ext_hdmi_reg_settings, + sizeof(integrated_info->dp0_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp0_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp0_ext_hdmi_6g_reg_settings)); + result = true; + break; + case ENGINE_ID_DIGB: + settings->slv_addr = integrated_info->dp1_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp1_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp1_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp1_ext_hdmi_reg_settings, + sizeof(integrated_info->dp1_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp1_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp1_ext_hdmi_6g_reg_settings)); + result = true; + break; + case ENGINE_ID_DIGC: + settings->slv_addr = integrated_info->dp2_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp2_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp2_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp2_ext_hdmi_reg_settings, + sizeof(integrated_info->dp2_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp2_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp2_ext_hdmi_6g_reg_settings)); + result = true; + break; + case ENGINE_ID_DIGD: + settings->slv_addr = integrated_info->dp3_ext_hdmi_slv_addr; + settings->reg_num = integrated_info->dp3_ext_hdmi_6g_reg_num; + settings->reg_num_6g = integrated_info->dp3_ext_hdmi_6g_reg_num; + memmove(settings->reg_settings, + integrated_info->dp3_ext_hdmi_reg_settings, + sizeof(integrated_info->dp3_ext_hdmi_reg_settings)); + memmove(settings->reg_settings_6g, + integrated_info->dp3_ext_hdmi_6g_reg_settings, + sizeof(integrated_info->dp3_ext_hdmi_6g_reg_settings)); + result = true; + break; + default: + break; + } + + if (result == true) { + // Validate settings from bios integrated info table + if (settings->slv_addr == 0) + return false; + if (settings->reg_num > 9) + return false; + if (settings->reg_num_6g > 3) + return false; + + for (i = 0; i < settings->reg_num; i++) { + if (settings->reg_settings[i].i2c_reg_index > 0x20) + return false; + } + + for (i = 0; i < settings->reg_num_6g; i++) { + if (settings->reg_settings_6g[i].i2c_reg_index > 0x20) + return false; + } + } + } + + return result; +} + +static bool write_i2c(struct pipe_ctx *pipe_ctx, + uint8_t address, uint8_t *buffer, uint32_t length) +{ + struct i2c_command cmd = {0}; + struct i2c_payload payload = {0}; + + memset(&payload, 0, sizeof(payload)); + memset(&cmd, 0, sizeof(cmd)); + + cmd.number_of_payloads = 1; + cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; + cmd.speed = pipe_ctx->stream->ctx->dc->caps.i2c_speed_in_khz; + + payload.address = address; + payload.data = buffer; + payload.length = length; + payload.write = true; + cmd.payloads = &payload; + + if (dm_helpers_submit_i2c(pipe_ctx->stream->ctx, + pipe_ctx->stream->link, &cmd)) + return true; + + return false; +} + +static void write_i2c_retimer_setting( + struct pipe_ctx *pipe_ctx, + bool is_vga_mode, + bool is_over_340mhz, + struct ext_hdmi_settings *settings) +{ + uint8_t slave_address = (settings->slv_addr >> 1); + uint8_t buffer[2]; + const uint8_t apply_rx_tx_change = 0x4; + uint8_t offset = 0xA; + uint8_t value = 0; + int i = 0; + bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + memset(&buffer, 0, sizeof(buffer)); + + /* Start Ext-Hdmi programming*/ + + for (i = 0; i < settings->reg_num; i++) { + /* Apply 3G settings */ + if (settings->reg_settings[i].i2c_reg_index <= 0x20) { + + buffer[0] = settings->reg_settings[i].i2c_reg_index; + buffer[1] = settings->reg_settings[i].i2c_reg_val; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + + if (!i2c_success) + goto i2c_write_fail; + + /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A + * needs to be set to 1 on every 0xA-0xC write. + */ + if (settings->reg_settings[i].i2c_reg_index == 0xA || + settings->reg_settings[i].i2c_reg_index == 0xB || + settings->reg_settings[i].i2c_reg_index == 0xC) { + + /* Query current value from offset 0xA */ + if (settings->reg_settings[i].i2c_reg_index == 0xA) + value = settings->reg_settings[i].i2c_reg_val; + else { + i2c_success = + link_query_ddc_data( + pipe_ctx->stream->link->ddc, + slave_address, &offset, 1, &value, 1); + if (!i2c_success) + goto i2c_write_fail; + } + + buffer[0] = offset; + /* Set APPLY_RX_TX_CHANGE bit to 1 */ + buffer[1] = value | apply_rx_tx_change; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + } + } + } + + /* Apply 3G settings */ + if (is_over_340mhz) { + for (i = 0; i < settings->reg_num_6g; i++) { + /* Apply 3G settings */ + if (settings->reg_settings[i].i2c_reg_index <= 0x20) { + + buffer[0] = settings->reg_settings_6g[i].i2c_reg_index; + buffer[1] = settings->reg_settings_6g[i].i2c_reg_val; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("above 340Mhz: retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + + if (!i2c_success) + goto i2c_write_fail; + + /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A + * needs to be set to 1 on every 0xA-0xC write. + */ + if (settings->reg_settings_6g[i].i2c_reg_index == 0xA || + settings->reg_settings_6g[i].i2c_reg_index == 0xB || + settings->reg_settings_6g[i].i2c_reg_index == 0xC) { + + /* Query current value from offset 0xA */ + if (settings->reg_settings_6g[i].i2c_reg_index == 0xA) + value = settings->reg_settings_6g[i].i2c_reg_val; + else { + i2c_success = + link_query_ddc_data( + pipe_ctx->stream->link->ddc, + slave_address, &offset, 1, &value, 1); + if (!i2c_success) + goto i2c_write_fail; + } + + buffer[0] = offset; + /* Set APPLY_RX_TX_CHANGE bit to 1 */ + buffer[1] = value | apply_rx_tx_change; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + } + } + } + } + + if (is_vga_mode) { + /* Program additional settings if using 640x480 resolution */ + + /* Write offset 0xFF to 0x01 */ + buffer[0] = 0xff; + buffer[1] = 0x01; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x00 to 0x23 */ + buffer[0] = 0x00; + buffer[1] = 0x23; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0xff to 0x00 */ + buffer[0] = 0xff; + buffer[1] = 0x00; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set retimer failed"); +} + +static void write_i2c_default_retimer_setting( + struct pipe_ctx *pipe_ctx, + bool is_vga_mode, + bool is_over_340mhz) +{ + uint8_t slave_address = (0xBA >> 1); + uint8_t buffer[2]; + bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + memset(&buffer, 0, sizeof(buffer)); + + /* Program Slave Address for tuning single integrity */ + /* Write offset 0x0A to 0x13 */ + buffer[0] = 0x0A; + buffer[1] = 0x13; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer writes default setting to slave_address = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0A to 0x17 */ + buffer[0] = 0x0A; + buffer[1] = 0x17; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0B to 0xDA or 0xD8 */ + buffer[0] = 0x0B; + buffer[1] = is_over_340mhz ? 0xDA : 0xD8; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0A to 0x17 */ + buffer[0] = 0x0A; + buffer[1] = 0x17; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0C to 0x1D or 0x91 */ + buffer[0] = 0x0C; + buffer[1] = is_over_340mhz ? 0x1D : 0x91; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x0A to 0x17 */ + buffer[0] = 0x0A; + buffer[1] = 0x17; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + + if (is_vga_mode) { + /* Program additional settings if using 640x480 resolution */ + + /* Write offset 0xFF to 0x01 */ + buffer[0] = 0xff; + buffer[1] = 0x01; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0x00 to 0x23 */ + buffer[0] = 0x00; + buffer[1] = 0x23; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + + /* Write offset 0xff to 0x00 */ + buffer[0] = 0xff; + buffer[1] = 0x00; + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("retimer write default setting to slave_addr = 0x%x,\ + offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n", + slave_address, buffer[0], buffer[1], i2c_success?1:0); + if (!i2c_success) + goto i2c_write_fail; + } + + return; + +i2c_write_fail: + DC_LOG_DEBUG("Set default retimer failed"); +} + +static void write_i2c_redriver_setting( + struct pipe_ctx *pipe_ctx, + bool is_over_340mhz) +{ + uint8_t slave_address = (0xF0 >> 1); + uint8_t buffer[16]; + bool i2c_success = false; + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + memset(&buffer, 0, sizeof(buffer)); + + // Program Slave Address for tuning single integrity + buffer[3] = 0x4E; + buffer[4] = 0x4E; + buffer[5] = 0x4E; + buffer[6] = is_over_340mhz ? 0x4E : 0x4A; + + i2c_success = write_i2c(pipe_ctx, slave_address, + buffer, sizeof(buffer)); + RETIMER_REDRIVER_INFO("redriver write 0 to all 16 reg offset expect following:\n\ + \t slave_addr = 0x%x, offset[3] = 0x%x, offset[4] = 0x%x,\ + offset[5] = 0x%x,offset[6] is_over_340mhz = 0x%x,\ + i2c_success = %d\n", + slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0); + + if (!i2c_success) + DC_LOG_DEBUG("Set redriver failed"); +} + +static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) +{ + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + struct link_encoder *link_enc = NULL; + struct cp_psp_stream_config config = {0}; + enum dp_panel_mode panel_mode = + dp_get_panel_mode(pipe_ctx->stream->link); + + if (cp_psp == NULL || cp_psp->funcs.update_stream_config == NULL) + return; + + link_enc = link_enc_cfg_get_link_enc(pipe_ctx->stream->link); + ASSERT(link_enc); + if (link_enc == NULL) + return; + + /* otg instance */ + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; + + /* dig front end */ + config.dig_fe = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst; + + /* stream encoder index */ + config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; + if (dp_is_128b_132b_signal(pipe_ctx)) + config.stream_enc_idx = + pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; + + /* dig back end */ + config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; + + /* link encoder index */ + config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + if (dp_is_128b_132b_signal(pipe_ctx)) + config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; + + /* dio output index is dpia index for DPIA endpoint & dcio index by default */ + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + config.dio_output_idx = pipe_ctx->stream->link->link_id.enum_id - ENUM_ID_1; + else + config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; + + + /* phy index */ + config.phy_idx = resource_transmitter_to_phy_idx( + pipe_ctx->stream->link->dc, link_enc->transmitter); + if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* USB4 DPIA doesn't use PHY in our soc, initialize it to 0 */ + config.phy_idx = 0; + + /* stream properties */ + config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; + config.mst_enabled = (pipe_ctx->stream->signal == + SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; + config.dp2_enabled = dp_is_128b_132b_signal(pipe_ctx) ? 1 : 0; + config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? + 1 : 0; + config.dpms_off = dpms_off; + + /* dm stream context */ + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); +} + +static void set_avmute(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + + if (!dc_is_hdmi_signal(pipe_ctx->stream->signal)) + return; + + dc->hwss.set_avmute(pipe_ctx, enable); +} + +static void enable_mst_on_sink(struct dc_link *link, bool enable) +{ + unsigned char mstmCntl; + + core_link_read_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); + if (enable) + mstmCntl |= DP_MST_EN; + else + mstmCntl &= (~DP_MST_EN); + + core_link_write_dpcd(link, DP_MSTM_CTRL, &mstmCntl, 1); +} + +static void dsc_optc_config_log(struct display_stream_compressor *dsc, + struct dsc_optc_config *config) +{ + uint32_t precision = 1 << 28; + uint32_t bytes_per_pixel_int = config->bytes_per_pixel / precision; + uint32_t bytes_per_pixel_mod = config->bytes_per_pixel % precision; + uint64_t ll_bytes_per_pix_fraq = bytes_per_pixel_mod; + DC_LOGGER_INIT(dsc->ctx->logger); + + /* 7 fractional digits decimal precision for bytes per pixel is enough because DSC + * bits per pixel precision is 1/16th of a pixel, which means bytes per pixel precision is + * 1/16/8 = 1/128 of a byte, or 0.0078125 decimal + */ + ll_bytes_per_pix_fraq *= 10000000; + ll_bytes_per_pix_fraq /= precision; + + DC_LOG_DSC("\tbytes_per_pixel 0x%08x (%d.%07d)", + config->bytes_per_pixel, bytes_per_pixel_int, (uint32_t)ll_bytes_per_pix_fraq); + DC_LOG_DSC("\tis_pixel_format_444 %d", config->is_pixel_format_444); + DC_LOG_DSC("\tslice_width %d", config->slice_width); +} + +static bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + bool result = false; + + if (dc_is_virtual_signal(stream->signal)) + result = true; + else + result = dm_helpers_dp_write_dsc_enable(dc->ctx, stream, enable); + return result; +} + +/* The stream with these settings can be sent (unblanked) only after DSC was enabled on RX first, + * i.e. after dp_enable_dsc_on_rx() had been called + */ +void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct pipe_ctx *odm_pipe; + int opp_cnt = 1; + DC_LOGGER_INIT(dsc->ctx->logger); + + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + opp_cnt++; + + if (enable) { + struct dsc_config dsc_cfg; + struct dsc_optc_config dsc_optc_cfg; + enum optc_dsc_mode optc_dsc_mode; + + /* Enable DSC hw block */ + dsc_cfg.pic_width = (stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0); + dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; + + dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); + dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { + struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; + + odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); + odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); + } + dsc_cfg.dc_dsc_cfg.num_slices_h *= opp_cnt; + dsc_cfg.pic_width *= opp_cnt; + + optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; + + /* Enable DSC in encoder */ + if (dc_is_dp_signal(stream->signal) && !dp_is_128b_132b_signal(pipe_ctx)) { + DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); + dsc_optc_config_log(dsc, &dsc_optc_cfg); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, + optc_dsc_mode, + dsc_optc_cfg.bytes_per_pixel, + dsc_optc_cfg.slice_width); + + /* PPS SDP is set elsewhere because it has to be done after DIG FE is connected to DIG BE */ + } + + /* Enable DSC in OPTC */ + DC_LOG_DSC("Setting optc DSC config for tg instance %d:", pipe_ctx->stream_res.tg->inst); + dsc_optc_config_log(dsc, &dsc_optc_cfg); + pipe_ctx->stream_res.tg->funcs->set_dsc_config(pipe_ctx->stream_res.tg, + optc_dsc_mode, + dsc_optc_cfg.bytes_per_pixel, + dsc_optc_cfg.slice_width); + } else { + /* disable DSC in OPTC */ + pipe_ctx->stream_res.tg->funcs->set_dsc_config( + pipe_ctx->stream_res.tg, + OPTC_DSC_DISABLED, 0, 0); + + /* disable DSC in stream encoder */ + if (dc_is_dp_signal(stream->signal)) { + if (dp_is_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else { + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( + pipe_ctx->stream_res.stream_enc, + OPTC_DSC_DISABLED, 0, 0); + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } + } + + /* disable DSC block */ + pipe_ctx->stream_res.dsc->funcs->dsc_disable(pipe_ctx->stream_res.dsc); + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_pipe->stream_res.dsc->funcs->dsc_disable(odm_pipe->stream_res.dsc); + } +} + +/* + * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled; + * hence PPS info packet update need to use frame update instead of immediate update. + * Added parameter immediate_update for this purpose. + * The decision to use frame update is hard-coded in function dp_update_dsc_config(), + * which is the only place where a "false" would be passed in for param immediate_update. + * + * immediate_update is only applicable when DSC is enabled. + */ +bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + struct dc_stream_state *stream = pipe_ctx->stream; + + if (!pipe_ctx->stream->timing.flags.DSC) + return false; + + if (!dsc) + return false; + + DC_LOGGER_INIT(dsc->ctx->logger); + + if (enable) { + struct dsc_config dsc_cfg; + uint8_t dsc_packed_pps[128]; + + memset(&dsc_cfg, 0, sizeof(dsc_cfg)); + memset(dsc_packed_pps, 0, 128); + + /* Enable DSC hw block */ + dsc_cfg.pic_width = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; + dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; + dsc_cfg.pixel_encoding = stream->timing.pixel_encoding; + dsc_cfg.color_depth = stream->timing.display_color_depth; + dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false; + dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg; + + dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); + memcpy(&stream->dsc_packed_pps[0], &dsc_packed_pps[0], sizeof(stream->dsc_packed_pps)); + if (dc_is_dp_signal(stream->signal)) { + DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); + if (dp_is_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, + true, + &dsc_packed_pps[0], + immediate_update); + } + } else { + /* disable DSC PPS in stream encoder */ + memset(&stream->dsc_packed_pps[0], 0, sizeof(stream->dsc_packed_pps)); + if (dc_is_dp_signal(stream->signal)) { + if (dp_is_128b_132b_signal(pipe_ctx)) + pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.hpo_dp_stream_enc, + false, + NULL, + true); + else + pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( + pipe_ctx->stream_res.stream_enc, false, NULL, true); + } + } + + return true; +} + +bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + bool result = false; + + if (!pipe_ctx->stream->timing.flags.DSC) + goto out; + if (!dsc) + goto out; + + if (enable) { + { + link_set_dsc_on_stream(pipe_ctx, true); + result = true; + } + } else { + dp_set_dsc_on_rx(pipe_ctx, false); + link_set_dsc_on_stream(pipe_ctx, false); + result = true; + } +out: + return result; +} + +bool link_update_dsc_config(struct pipe_ctx *pipe_ctx) +{ + struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; + + if (!pipe_ctx->stream->timing.flags.DSC) + return false; + if (!dsc) + return false; + + link_set_dsc_on_stream(pipe_ctx, true); + link_set_dsc_pps_packet(pipe_ctx, true, false); + return true; +} + +static void enable_stream_features(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + + if (pipe_ctx->stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) { + struct dc_link *link = stream->link; + union down_spread_ctrl old_downspread; + union down_spread_ctrl new_downspread; + + memset(&old_downspread, 0, sizeof(old_downspread)); + + core_link_read_dpcd(link, DP_DOWNSPREAD_CTRL, + &old_downspread.raw, sizeof(old_downspread)); + + new_downspread.raw = old_downspread.raw; + + new_downspread.bits.IGNORE_MSA_TIMING_PARAM = + (stream->ignore_msa_timing_param) ? 1 : 0; + + if (new_downspread.raw != old_downspread.raw) { + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &new_downspread.raw, sizeof(new_downspread)); + } + + } else { + dm_helpers_mst_enable_stream_features(stream); + } +} + +static void log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) +{ + const uint32_t VCP_Y_PRECISION = 1000; + uint64_t vcp_x, vcp_y; + DC_LOGGER_INIT(link->ctx->logger); + + // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision + avg_time_slots_per_mtp = dc_fixpt_add( + avg_time_slots_per_mtp, + dc_fixpt_from_fraction( + 1, + 2*VCP_Y_PRECISION)); + + vcp_x = dc_fixpt_floor( + avg_time_slots_per_mtp); + vcp_y = dc_fixpt_floor( + dc_fixpt_mul_int( + dc_fixpt_sub_int( + avg_time_slots_per_mtp, + dc_fixpt_floor( + avg_time_slots_per_mtp)), + VCP_Y_PRECISION)); + + + if (link->type == dc_connection_mst_branch) + DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream " + "X: %llu " + "Y: %llu/%d", + vcp_x, + vcp_y, + VCP_Y_PRECISION); + else + DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream " + "X: %llu " + "Y: %llu/%d", + vcp_x, + vcp_y, + VCP_Y_PRECISION); +} + +static struct fixed31_32 get_pbn_per_slot(struct dc_stream_state *stream) +{ + struct fixed31_32 mbytes_per_sec; + uint32_t link_rate_in_mbytes_per_sec = dp_link_bandwidth_kbps(stream->link, + &stream->link->cur_link_settings); + link_rate_in_mbytes_per_sec /= 8000; /* Kbits to MBytes */ + + mbytes_per_sec = dc_fixpt_from_int(link_rate_in_mbytes_per_sec); + + return dc_fixpt_div_int(mbytes_per_sec, 54); +} + +static struct fixed31_32 get_pbn_from_bw_in_kbps(uint64_t kbps) +{ + struct fixed31_32 peak_kbps; + uint32_t numerator = 0; + uint32_t denominator = 1; + + /* + * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 + * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on + * common multiplier to render an integer PBN for all link rate/lane + * counts combinations + * calculate + * peak_kbps *= (1006/1000) + * peak_kbps *= (64/54) + * peak_kbps *= 8 convert to bytes + */ + + numerator = 64 * PEAK_FACTOR_X1000; + denominator = 54 * 8 * 1000 * 1000; + kbps *= numerator; + peak_kbps = dc_fixpt_from_fraction(kbps, denominator); + + return peak_kbps; +} + +static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) +{ + uint64_t kbps; + enum dc_link_encoding_format link_encoding; + + if (dp_is_128b_132b_signal(pipe_ctx)) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + else + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + + kbps = dc_bandwidth_in_kbps_from_timing(&pipe_ctx->stream->timing, link_encoding); + return get_pbn_from_bw_in_kbps(kbps); +} + + +// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST) +static void get_lane_status( + struct dc_link *link, + uint32_t lane_count, + union lane_status *status, + union lane_align_status_updated *status_updated) +{ + unsigned int lane; + uint8_t dpcd_buf[3] = {0}; + + if (status == NULL || status_updated == NULL) { + return; + } + + core_link_read_dpcd( + link, + DP_LANE0_1_STATUS, + dpcd_buf, + sizeof(dpcd_buf)); + + for (lane = 0; lane < lane_count; lane++) { + status[lane].raw = dp_get_nibble_at_index(&dpcd_buf[0], lane); + } + + status_updated->raw = dpcd_buf[2]; +} + +static bool poll_for_allocation_change_trigger(struct dc_link *link) +{ + /* + * wait for ACT handled + */ + int i; + const int act_retries = 30; + enum act_return_status result = ACT_FAILED; + union payload_table_update_status update_status = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated lane_status_updated; + DC_LOGGER_INIT(link->ctx->logger); + + if (link->aux_access_disabled) + return true; + for (i = 0; i < act_retries; i++) { + get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated); + + if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) || + !dp_is_interlane_aligned(lane_status_updated)) { + DC_LOG_ERROR("SST Update Payload: Link loss occurred while " + "polling for ACT handled."); + result = ACT_LINK_LOST; + break; + } + core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + if (update_status.bits.ACT_HANDLED == 1) { + DC_LOG_DP2("SST Update Payload: ACT handled by downstream."); + result = ACT_SUCCESS; + break; + } + + fsleep(5000); + } + + if (result == ACT_FAILED) { + DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, " + "continue on. Something is wrong with the branch."); + } + + return (result == ACT_SUCCESS); +} + +static void update_mst_stream_alloc_table( + struct dc_link *link, + struct stream_encoder *stream_enc, + struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? + const struct dc_dp_mst_stream_allocation_table *proposed_table) +{ + struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; + struct link_mst_stream_allocation *dc_alloc; + + int i; + int j; + + /* if DRM proposed_table has more than one new payload */ + ASSERT(proposed_table->stream_count - + link->mst_stream_alloc_table.stream_count < 2); + + /* copy proposed_table to link, add stream encoder */ + for (i = 0; i < proposed_table->stream_count; i++) { + + for (j = 0; j < link->mst_stream_alloc_table.stream_count; j++) { + dc_alloc = + &link->mst_stream_alloc_table.stream_allocations[j]; + + if (dc_alloc->vcp_id == + proposed_table->stream_allocations[i].vcp_id) { + + work_table[i] = *dc_alloc; + work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; + break; /* exit j loop */ + } + } + + /* new vcp_id */ + if (j == link->mst_stream_alloc_table.stream_count) { + work_table[i].vcp_id = + proposed_table->stream_allocations[i].vcp_id; + work_table[i].slot_count = + proposed_table->stream_allocations[i].slot_count; + work_table[i].stream_enc = stream_enc; + work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; + } + } + + /* update link->mst_stream_alloc_table with work_table */ + link->mst_stream_alloc_table.stream_count = + proposed_table->stream_count; + for (i = 0; i < MAX_CONTROLLER_NUM; i++) + link->mst_stream_alloc_table.stream_allocations[i] = + work_table[i]; +} + +static void remove_stream_from_alloc_table( + struct dc_link *link, + struct stream_encoder *dio_stream_enc, + struct hpo_dp_stream_encoder *hpo_dp_stream_enc) +{ + int i = 0; + struct link_mst_stream_allocation_table *table = + &link->mst_stream_alloc_table; + + if (hpo_dp_stream_enc) { + for (; i < table->stream_count; i++) + if (hpo_dp_stream_enc == table->stream_allocations[i].hpo_dp_stream_enc) + break; + } else { + for (; i < table->stream_count; i++) + if (dio_stream_enc == table->stream_allocations[i].stream_enc) + break; + } + + if (i < table->stream_count) { + i++; + for (; i < table->stream_count; i++) + table->stream_allocations[i-1] = table->stream_allocations[i]; + memset(&table->stream_allocations[table->stream_count-1], 0, + sizeof(struct link_mst_stream_allocation)); + table->stream_count--; + } +} + +static enum dc_status deallocate_mst_payload_with_temp_drm_wa( + struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); + int i; + bool mst_mode = (link->type == dc_connection_mst_branch); + /* adjust for drm changes*/ + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + const struct dc_link_settings empty_link_settings = {0}; + DC_LOGGER_INIT(link->ctx->logger); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); + + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + false)) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + else + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + + DC_LOG_MST("%s" + "stream_count: %d: ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_DEBUG("Unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + if (mst_mode) { + dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + } + + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + false); + + return DC_OK; +} + +static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); + int i; + bool mst_mode = (link->type == dc_connection_mst_branch); + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + const struct dc_link_settings empty_link_settings = {0}; + DC_LOGGER_INIT(link->ctx->logger); + + if (link->dc->debug.temp_mst_deallocation_sequence) + return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx); + + /* deallocate_mst_payload is called before disable link. When mode or + * disable/enable monitor, new stream is created which is not in link + * stream[] yet. For this, payload is not allocated yet, so de-alloc + * should not done. For new mode set, map_resources will get engine + * for new stream, so stream_enc->id should be validated until here. + */ + + /* slot X.Y */ + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); + + if (mst_mode) { + /* when link is in mst mode, reply on mst manager to remove + * payload + */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + false)) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + else + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + } else { + /* when link is no longer in mst mode (mst hub unplugged), + * remove payload with default dc logic + */ + remove_stream_from_alloc_table(link, pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc); + } + + DC_LOG_MST("%s" + "stream_count: %d: ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + /* update mst stream allocation table hardware state */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_DEBUG("Unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + if (mst_mode) { + dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + false); + } + + return DC_OK; +} + +/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table + * because stream_encoder is not exposed to dm + */ +static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + int i; + enum act_return_status ret; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* enable_link_dp_mst already check link->enabled_stream_count + * and stream is in link->stream[]. This is called during set mode, + * stream_enc is available. + */ + + /* get calculate VC payload for stream: stream_alloc */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + else + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* program DP source TX for payload */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, + &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + /* send down message */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + if (ret != ACT_LINK_LOST) { + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } + + /* slot X.Y for only current stream */ + pbn_per_slot = get_pbn_per_slot(stream); + if (pbn_per_slot.value == 0) { + DC_LOG_ERROR("Failure: pbn_per_slot==0 not allowed. Cannot continue, returning DC_UNSUPPORTED_VALUE.\n"); + return DC_UNSUPPORTED_VALUE; + } + pbn = get_pbn_from_timing(pipe_ctx); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + log_vcp_x_y(link, avg_time_slots_per_mtp); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + + return DC_OK; +} + +struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link) +{ + struct fixed31_32 link_bw_effective = + dc_fixpt_from_int( + dp_link_bandwidth_kbps(link, &link->cur_link_settings)); + struct fixed31_32 timeslot_bw_effective = + dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT); + struct fixed31_32 timing_bw = + dc_fixpt_from_int( + dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(link))); + struct fixed31_32 avg_time_slots_per_mtp = + dc_fixpt_div(timing_bw, timeslot_bw_effective); + + return avg_time_slots_per_mtp; +} + + +static bool write_128b_132b_sst_payload_allocation_table( + const struct dc_stream_state *stream, + struct dc_link *link, + struct link_mst_stream_allocation_table *proposed_table, + bool allocate) +{ + const uint8_t vc_id = 1; /// VC ID always 1 for SST + const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST + bool result = false; + uint8_t req_slot_count = 0; + struct fixed31_32 avg_time_slots_per_mtp = { 0 }; + union payload_table_update_status update_status = { 0 }; + const uint32_t max_retries = 30; + uint32_t retries = 0; + DC_LOGGER_INIT(link->ctx->logger); + + if (allocate) { + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); + req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp); + /// Validation should filter out modes that exceed link BW + ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); + if (req_slot_count > MAX_MTP_SLOT_COUNT) + return false; + } else { + /// Leave req_slot_count = 0 if allocate is false. + } + + proposed_table->stream_count = 1; /// Always 1 stream for SST + proposed_table->stream_allocations[0].slot_count = req_slot_count; + proposed_table->stream_allocations[0].vcp_id = vc_id; + + if (link->aux_access_disabled) + return true; + + /// Write DPCD 2C0 = 1 to start updating + update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1; + core_link_write_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1); + + /// Program the changes in DPCD 1C0 - 1C2 + ASSERT(vc_id == 1); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_SET, + &vc_id, + 1); + + ASSERT(start_time_slot == 0); + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_START_TIME_SLOT, + &start_time_slot, + 1); + + core_link_write_dpcd( + link, + DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT, + &req_slot_count, + 1); + + /// Poll till DPCD 2C0 read 1 + /// Try for at least 150ms (30 retries, with 5ms delay after each attempt) + + while (retries < max_retries) { + if (core_link_read_dpcd( + link, + DP_PAYLOAD_TABLE_UPDATE_STATUS, + &update_status.raw, + 1) == DC_OK) { + if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) { + DC_LOG_DP2("SST Update Payload: downstream payload table updated."); + result = true; + break; + } + } else { + union dpcd_rev dpcdRev; + + if (core_link_read_dpcd( + link, + DP_DPCD_REV, + &dpcdRev.raw, + 1) != DC_OK) { + DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision " + "of sink while polling payload table " + "updated status bit."); + break; + } + } + retries++; + fsleep(5000); + } + + if (!result && retries == max_retries) { + DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, " + "continue on. Something is wrong with the branch."); + // TODO - DP2.0 Payload: Read and log the payload table from downstream branch + } + + return result; +} + +/* + * Payload allocation/deallocation for SST introduced in DP2.0 + */ +static enum dc_status update_sst_payload(struct pipe_ctx *pipe_ctx, + bool allocate) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct link_mst_stream_allocation_table proposed_table = {0}; + struct fixed31_32 avg_time_slots_per_mtp; + const struct dc_link_settings empty_link_settings = {0}; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* slot X.Y for SST payload deallocate */ + if (!allocate) { + avg_time_slots_per_mtp = dc_fixpt_from_int(0); + + log_vcp_x_y(link, avg_time_slots_per_mtp); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, + avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); + } + + /* calculate VC payload and update branch with new payload allocation table*/ + if (!write_128b_132b_sst_payload_allocation_table( + stream, + link, + &proposed_table, + allocate)) { + DC_LOG_ERROR("SST Update Payload: Failed to update " + "allocation table for " + "pipe idx: %d\n", + pipe_ctx->pipe_idx); + return DC_FAIL_DP_PAYLOAD_ALLOCATION; + } + + proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; + + ASSERT(proposed_table.stream_count == 1); + + //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id + DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p " + "vcp_id: %d " + "slot_count: %d\n", + (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc, + proposed_table.stream_allocations[0].vcp_id, + proposed_table.stream_allocations[0].slot_count); + + /* program DP source TX for payload */ + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &proposed_table); + + /* poll for ACT handled */ + if (!poll_for_allocation_change_trigger(link)) { + // Failures will result in blackscreen and errors logged + BREAK_TO_DEBUGGER(); + } + + /* slot X.Y for SST payload allocate */ + if (allocate && link_dp_get_encoding_format(&link->cur_link_settings) == + DP_128b_132b_ENCODING) { + avg_time_slots_per_mtp = link_calculate_sst_avg_time_slots_per_mtp(stream, link); + + log_vcp_x_y(link, avg_time_slots_per_mtp); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, + avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + } + + /* Always return DC_OK. + * If part of sequence fails, log failure(s) and show blackscreen + */ + return DC_OK; +} + +enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + uint8_t i; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* decrease throttled vcp size */ + pbn_per_slot = get_pbn_per_slot(stream); + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } else { + DC_LOG_WARNING("Failed to update" + "MST allocation table for" + "pipe idx:%d\n", + pipe_ctx->pipe_idx); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + return DC_OK; +} + +enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct fixed31_32 avg_time_slots_per_mtp; + struct fixed31_32 pbn; + struct fixed31_32 pbn_per_slot; + struct dc_dp_mst_stream_allocation_table proposed_table = {0}; + uint8_t i; + enum act_return_status ret; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + DC_LOGGER_INIT(link->ctx->logger); + + /* notify immediate branch device table update */ + if (dm_helpers_dp_mst_write_payload_allocation_table( + stream->ctx, + stream, + &proposed_table, + true)) { + /* update mst stream allocation table software state */ + update_mst_stream_alloc_table( + link, + pipe_ctx->stream_res.stream_enc, + pipe_ctx->stream_res.hpo_dp_stream_enc, + &proposed_table); + } + + DC_LOG_MST("%s " + "stream_count: %d: \n ", + __func__, + link->mst_stream_alloc_table.stream_count); + + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + DC_LOG_MST("stream_enc[%d]: %p " + "stream[%d].hpo_dp_stream_enc: %p " + "stream[%d].vcp_id: %d " + "stream[%d].slot_count: %d\n", + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, + i, + (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc, + i, + link->mst_stream_alloc_table.stream_allocations[i].vcp_id, + i, + link->mst_stream_alloc_table.stream_allocations[i].slot_count); + } + + ASSERT(proposed_table.stream_count > 0); + + /* update mst stream allocation table hardware state */ + if (link_hwss->ext.update_stream_allocation_table == NULL || + link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) { + DC_LOG_ERROR("Failure: unknown encoding format\n"); + return DC_ERROR_UNEXPECTED; + } + + link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res, + &link->mst_stream_alloc_table); + + /* poll for immediate branch device ACT handled */ + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( + stream->ctx, + stream); + + if (ret != ACT_LINK_LOST) { + /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } + + /* increase throttled vcp size */ + pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); + pbn_per_slot = get_pbn_per_slot(stream); + avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); + + if (link_hwss->ext.set_throttled_vcp_size) + link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->ext.set_hblank_min_symbol_width) + link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); + + return DC_OK; +} + +static void disable_link_dp(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc_link_settings link_settings = link->cur_link_settings; + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST && + link->mst_stream_alloc_table.stream_count > 0) + /* disable MST link only when last vc payload is deallocated */ + return; + + dp_disable_link_phy(link, link_res, signal); + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + if (!link->skip_implict_edp_power_control) + link->dc->hwss.edp_power_control(link, false); + } + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + /* set the sink to SST mode after disabling the link */ + enable_mst_on_sink(link, false); + + if (link_dp_get_encoding_format(&link_settings) == + DP_8b_10b_ENCODING) { + dp_set_fec_enable(link, false); + dp_set_fec_ready(link, link_res, false); + } +} + +static void disable_link(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + if (dc_is_dp_signal(signal)) { + disable_link_dp(link, link_res, signal); + } else if (signal != SIGNAL_TYPE_VIRTUAL) { + link->dc->hwss.disable_link_output(link, link_res, signal); + } + + if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + /* MST disable link only when no stream use the link */ + if (link->mst_stream_alloc_table.stream_count <= 0) + link->link_status.link_active = false; + } else { + link->link_status.link_active = false; + } +} + +static void enable_link_hdmi(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dc_color_depth display_color_depth; + enum engine_id eng_id; + struct ext_hdmi_settings settings = {0}; + bool is_over_340mhz = false; + bool is_vga_mode = (stream->timing.h_addressable == 640) + && (stream->timing.v_addressable == 480); + struct dc *dc = pipe_ctx->stream->ctx->dc; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + + if (stream->phy_pix_clk == 0) + stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; + if (stream->phy_pix_clk > 340000) + is_over_340mhz = true; + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { + unsigned short masked_chip_caps = pipe_ctx->stream->link->chip_caps & + EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; + if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { + /* DP159, Retimer settings */ + eng_id = pipe_ctx->stream_res.stream_enc->id; + + if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) { + write_i2c_retimer_setting(pipe_ctx, + is_vga_mode, is_over_340mhz, &settings); + } else { + write_i2c_default_retimer_setting(pipe_ctx, + is_vga_mode, is_over_340mhz); + } + } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { + /* PI3EQX1204, Redriver settings */ + write_i2c_redriver_setting(pipe_ctx, is_over_340mhz); + } + } + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + write_scdc_data( + stream->link->ddc, + stream->phy_pix_clk, + stream->timing.flags.LTE_340MCSC_SCRAMBLE); + + memset(&stream->link->cur_link_settings, 0, + sizeof(struct dc_link_settings)); + + display_color_depth = stream->timing.display_color_depth; + if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR422) + display_color_depth = COLOR_DEPTH_888; + + /* We need to enable stream encoder for TMDS first to apply 1/4 TMDS + * character clock in case that beyond 340MHz. + */ + if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) + link_hwss->setup_stream_encoder(pipe_ctx); + + dc->hwss.enable_tmds_link_output( + link, + &pipe_ctx->link_res, + pipe_ctx->stream->signal, + pipe_ctx->clock_source->id, + display_color_depth, + stream->phy_pix_clk); + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + read_scdc_data(link->ddc); +} + +static enum dc_status enable_link_dp(struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + enum dc_status status; + bool skip_video_pattern; + struct dc_link *link = stream->link; + const struct dc_link_settings *link_settings = + &pipe_ctx->link_config.dp_link_settings; + bool fec_enable; + int i; + bool apply_seamless_boot_optimization = false; + uint32_t bl_oled_enable_delay = 50; // in ms + uint32_t post_oui_delay = 30; // 30ms + /* Reduce link bandwidth between failed link training attempts. */ + bool do_fallback = false; + int lt_attempts = LINK_TRAINING_ATTEMPTS; + + // Increase retry count if attempting DP1.x on FIXED_VS link + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) + lt_attempts = 10; + + // check for seamless boot + for (i = 0; i < state->stream_count; i++) { + if (state->streams[i]->apply_seamless_boot_optimization) { + apply_seamless_boot_optimization = true; + break; + } + } + + /* + * If the link is DP-over-USB4 do the following: + * - Train with fallback when enabling DPIA link. Conventional links are + * trained with fallback during sink detection. + * - Allocate only what the stream needs for bw in Gbps. Inform the CM + * in case stream needs more or less bw from what has been allocated + * earlier at plug time. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + do_fallback = true; + } + + /* + * Temporary w/a to get DP2.0 link rates to work with SST. + * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. + */ + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + link->dc->debug.set_mst_en_for_sst) { + enable_mst_on_sink(link, true); + } + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { + /*in case it is not on*/ + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + } + + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ + } else { + pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = + link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; + if (state->clk_mgr && !apply_seamless_boot_optimization) + state->clk_mgr->funcs->update_clocks(state->clk_mgr, + state, false); + } + + // during mode switch we do DP_SET_POWER off then on, and OUI is lost + dpcd_set_source_specific_data(link); + if (link->dpcd_sink_ext_caps.raw != 0) { + post_oui_delay += link->panel_config.pps.extra_post_OUI_ms; + msleep(post_oui_delay); + } + + // similarly, mode switch can cause loss of cable ID + dpcd_write_cable_id_to_dprx(link); + + skip_video_pattern = true; + + if (link_settings->link_rate == LINK_RATE_LOW) + skip_video_pattern = false; + + if (perform_link_training_with_retries(link_settings, + skip_video_pattern, + lt_attempts, + pipe_ctx, + pipe_ctx->stream->signal, + do_fallback)) { + status = DC_OK; + } else { + status = DC_FAIL_DP_LINK_TRAINING; + } + + if (link->preferred_training_settings.fec_enable) + fec_enable = *link->preferred_training_settings.fec_enable; + else + fec_enable = true; + + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) + dp_set_fec_enable(link, fec_enable); + + // during mode set we do DP_SET_POWER off then on, aux writes are lost + if (link->dpcd_sink_ext_caps.bits.oled == 1 || + link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 || + link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) { + set_default_brightness_aux(link); + if (link->dpcd_sink_ext_caps.bits.oled == 1) + msleep(bl_oled_enable_delay); + edp_backlight_enable_aux(link, true); + } + + return status; +} + +static enum dc_status enable_link_edp( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + return enable_link_dp(state, pipe_ctx); +} + +static void enable_link_lvds(struct pipe_ctx *pipe_ctx) +{ + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + struct dc *dc = stream->ctx->dc; + + if (stream->phy_pix_clk == 0) + stream->phy_pix_clk = stream->timing.pix_clk_100hz / 10; + + memset(&stream->link->cur_link_settings, 0, + sizeof(struct dc_link_settings)); + dc->hwss.enable_lvds_link_output( + link, + &pipe_ctx->link_res, + pipe_ctx->clock_source->id, + stream->phy_pix_clk); + +} + +static enum dc_status enable_link_dp_mst( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + struct dc_link *link = pipe_ctx->stream->link; + unsigned char mstm_cntl; + + /* sink signal type after MST branch is MST. Multiple MST sinks + * share one link. Link DP PHY is enable or training only once. + */ + if (link->link_status.link_active) + return DC_OK; + + /* clear payload table */ + core_link_read_dpcd(link, DP_MSTM_CTRL, &mstm_cntl, 1); + if (mstm_cntl & DP_MST_EN) + dm_helpers_dp_mst_clear_payload_allocation_table(link->ctx, link); + + /* to make sure the pending down rep can be processed + * before enabling the link + */ + dm_helpers_dp_mst_poll_pending_down_reply(link->ctx, link); + + /* set the sink to MST mode before enabling the link */ + enable_mst_on_sink(link, true); + + return enable_link_dp(state, pipe_ctx); +} + +static enum dc_status enable_link( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + enum dc_status status = DC_ERROR_UNEXPECTED; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + + /* There's some scenarios where driver is unloaded with display + * still enabled. When driver is reloaded, it may cause a display + * to not light up if there is a mismatch between old and new + * link settings. Need to call disable first before enabling at + * new link settings. + */ + if (link->link_status.link_active) + disable_link(link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + + switch (pipe_ctx->stream->signal) { + case SIGNAL_TYPE_DISPLAY_PORT: + status = enable_link_dp(state, pipe_ctx); + break; + case SIGNAL_TYPE_EDP: + status = enable_link_edp(state, pipe_ctx); + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + status = enable_link_dp_mst(state, pipe_ctx); + msleep(200); + break; + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + enable_link_hdmi(pipe_ctx); + status = DC_OK; + break; + case SIGNAL_TYPE_LVDS: + enable_link_lvds(pipe_ctx); + status = DC_OK; + break; + case SIGNAL_TYPE_VIRTUAL: + status = DC_OK; + break; + default: + break; + } + + if (status == DC_OK) { + pipe_ctx->stream->link->link_status.link_active = true; + } + + return status; +} + +void link_set_dpms_off(struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->sink->link; + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + + ASSERT(is_master_pipe_for_link(link, pipe_ctx)); + + if (dp_is_128b_132b_signal(pipe_ctx)) + vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; + + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + if (pipe_ctx->stream->sink) { + if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + pipe_ctx->stream->sink->edid_caps.display_name, + pipe_ctx->stream->signal); + } + } + + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + + if (!pipe_ctx->stream->sink->edid_caps.panel_patch.skip_avmute) { + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) + set_avmute(pipe_ctx, true); + } + + dc->hwss.disable_audio_stream(pipe_ctx); + + update_psp_stream_config(pipe_ctx, true); + dc->hwss.blank_stream(pipe_ctx); + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + deallocate_mst_payload(pipe_ctx); + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + dp_is_128b_132b_signal(pipe_ctx)) + update_sst_payload(pipe_ctx, false); + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { + struct ext_hdmi_settings settings = {0}; + enum engine_id eng_id = pipe_ctx->stream_res.stream_enc->id; + + unsigned short masked_chip_caps = link->chip_caps & + EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK; + //Need to inform that sink is going to use legacy HDMI mode. + write_scdc_data( + link->ddc, + 165000,//vbios only handles 165Mhz. + false); + if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_TISN65DP159RSBT) { + /* DP159, Retimer settings */ + if (get_ext_hdmi_settings(pipe_ctx, eng_id, &settings)) + write_i2c_retimer_setting(pipe_ctx, + false, false, &settings); + else + write_i2c_default_retimer_setting(pipe_ctx, + false, false); + } else if (masked_chip_caps == EXT_DISPLAY_PATH_CAPS__HDMI20_PI3EQX1204) { + /* PI3EQX1204, Redriver settings */ + write_i2c_redriver_setting(pipe_ctx, false); + } + } + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + !dp_is_128b_132b_signal(pipe_ctx)) { + + /* In DP1.x SST mode, our encoder will go to TPS1 + * when link is on but stream is off. + * Disabling link before stream will avoid exposing TPS1 pattern + * during the disable sequence as it will confuse some receivers + * state machine. + * In DP2 or MST mode, our encoder will stay video active + */ + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + dc->hwss.disable_stream(pipe_ctx); + } else { + dc->hwss.disable_stream(pipe_ctx); + disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); + } + + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, false); + } + if (dp_is_128b_132b_signal(pipe_ctx)) { + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); + } + + if (vpg && vpg->funcs->vpg_powerdown) + vpg->funcs->vpg_powerdown(vpg); +} + +void link_set_dpms_on( + struct dc_state *state, + struct pipe_ctx *pipe_ctx) +{ + struct dc *dc = pipe_ctx->stream->ctx->dc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->sink->link; + enum dc_status status; + struct link_encoder *link_enc; + enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; + struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + bool apply_edp_fast_boot_optimization = + pipe_ctx->stream->apply_edp_fast_boot_optimization; + + ASSERT(is_master_pipe_for_link(link, pipe_ctx)); + + if (dp_is_128b_132b_signal(pipe_ctx)) + vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; + + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + + if (pipe_ctx->stream->sink) { + if (pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + pipe_ctx->stream->sink->sink_signal != SIGNAL_TYPE_NONE) { + DC_LOG_DC("%s pipe_ctx dispname=%s signal=%x\n", __func__, + pipe_ctx->stream->sink->edid_caps.display_name, + pipe_ctx->stream->signal); + } + } + + if (dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (!dc_is_virtual_signal(pipe_ctx->stream->signal) + && !dp_is_128b_132b_signal(pipe_ctx)) { + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); + } + + pipe_ctx->stream->link->link_state_valid = true; + + if (pipe_ctx->stream_res.tg->funcs->set_out_mux) { + if (dp_is_128b_132b_signal(pipe_ctx)) + otg_out_dest = OUT_MUX_HPO_DP; + else + otg_out_dest = OUT_MUX_DIO; + pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); + } + + link_hwss->setup_stream_attribute(pipe_ctx); + + pipe_ctx->stream->apply_edp_fast_boot_optimization = false; + + // Enable VPG before building infoframe + if (vpg && vpg->funcs->vpg_poweron) + vpg->funcs->vpg_poweron(vpg); + + resource_build_info_frame(pipe_ctx); + dc->hwss.update_info_frame(pipe_ctx); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME); + + /* Do not touch link on seamless boot optimization. */ + if (pipe_ctx->stream->apply_seamless_boot_optimization) { + pipe_ctx->stream->dpms_off = false; + + /* Still enable stream features & audio on seamless boot for DP external displays */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT) { + enable_stream_features(pipe_ctx); + dc->hwss.enable_audio_stream(pipe_ctx); + } + + update_psp_stream_config(pipe_ctx, false); + return; + } + + /* eDP lit up by bios already, no need to enable again. */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + apply_edp_fast_boot_optimization && + !pipe_ctx->stream->timing.flags.DSC && + !pipe_ctx->next_odm_pipe) { + pipe_ctx->stream->dpms_off = false; + update_psp_stream_config(pipe_ctx, false); + return; + } + + if (pipe_ctx->stream->dpms_off) + return; + + /* Have to setup DSC before DIG FE and BE are connected (which happens before the + * link training). This is to make sure the bandwidth sent to DIG BE won't be + * bigger than what the link and/or DIG BE can handle. VBID[6]/CompressedStream_flag + * will be automatically set at a later time when the video is enabled + * (DP_VID_STREAM_EN = 1). + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) + link_set_dsc_enable(pipe_ctx, true); + + } + + status = enable_link(state, pipe_ctx); + + if (status != DC_OK) { + DC_LOG_WARNING("enabling link %u failed: %d\n", + pipe_ctx->stream->link->link_index, + status); + + /* Abort stream enable *unless* the failure was due to + * DP link training - some DP monitors will recover and + * show the stream anyway. But MST displays can't proceed + * without link training. + */ + if (status != DC_FAIL_DP_LINK_TRAINING || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (false == stream->link->link_status.link_active) + disable_link(stream->link, &pipe_ctx->link_res, + pipe_ctx->stream->signal); + BREAK_TO_DEBUGGER(); + return; + } + } + + /* turn off otg test pattern if enable */ + if (pipe_ctx->stream_res.tg->funcs->set_test_pattern) + pipe_ctx->stream_res.tg->funcs->set_test_pattern(pipe_ctx->stream_res.tg, + CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, + COLOR_DEPTH_UNDEFINED); + + /* This second call is needed to reconfigure the DIG + * as a workaround for the incorrect value being applied + * from transmitter control. + */ + if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || + dp_is_128b_132b_signal(pipe_ctx))) { + if (link_enc) + link_enc->funcs->setup( + link_enc, + pipe_ctx->stream->signal); + } + + dc->hwss.enable_stream(pipe_ctx); + + /* Set DPS PPS SDP (AKA "info frames") */ + if (pipe_ctx->stream->timing.flags.DSC) { + if (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal)) { + dp_set_dsc_on_rx(pipe_ctx, true); + link_set_dsc_pps_packet(pipe_ctx, true, true); + } + } + + if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + allocate_mst_payload(pipe_ctx); + else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && + dp_is_128b_132b_signal(pipe_ctx)) + update_sst_payload(pipe_ctx, true); + + dc->hwss.unblank_stream(pipe_ctx, + &pipe_ctx->stream->link->cur_link_settings); + + if (stream->sink_patches.delay_ignore_msa > 0) + msleep(stream->sink_patches.delay_ignore_msa); + + if (dc_is_dp_signal(pipe_ctx->stream->signal)) + enable_stream_features(pipe_ctx); + update_psp_stream_config(pipe_ctx, false); + + dc->hwss.enable_audio_stream(pipe_ctx); + + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { + set_avmute(pipe_ctx, false); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.h b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h new file mode 100644 index 0000000000..9398f9c166 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.h @@ -0,0 +1,53 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DPMS_H__ +#define __DC_LINK_DPMS_H__ + +#include "link.h" +void link_set_dpms_on( + struct dc_state *state, + struct pipe_ctx *pipe_ctx); +void link_set_dpms_off(struct pipe_ctx *pipe_ctx); +void link_resume(struct dc_link *link); +void link_blank_all_dp_displays(struct dc *dc); +void link_blank_all_edp_displays(struct dc *dc); +void link_blank_dp_stream(struct dc_link *link, bool hw_init); +void link_set_all_streams_dpms_off_for_link(struct dc_link *link); +void link_get_master_pipes_with_dpms_on(const struct dc_link *link, + struct dc_state *state, + uint8_t *count, + struct pipe_ctx *pipes[MAX_PIPES]); +enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); +bool link_set_dsc_pps_packet(struct pipe_ctx *pipe_ctx, + bool enable, bool immediate_update); +struct fixed31_32 link_calculate_sst_avg_time_slots_per_mtp( + const struct dc_stream_state *stream, + const struct dc_link *link); +void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); +bool link_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); +bool link_update_dsc_config(struct pipe_ctx *pipe_ctx); +#endif /* __DC_LINK_DPMS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c new file mode 100644 index 0000000000..2c366866f5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -0,0 +1,867 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns the creation/destruction of link structure. + */ +#include "link_factory.h" +#include "link_detection.h" +#include "link_resource.h" +#include "link_validation.h" +#include "link_dpms.h" +#include "accessories/link_dp_cts.h" +#include "accessories/link_dp_trace.h" +#include "accessories/link_fpga.h" +#include "protocols/link_ddc.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_dpia_bw.h" +#include "protocols/link_dp_dpia.h" +#include "protocols/link_dp_irq_handler.h" +#include "protocols/link_dp_phy.h" +#include "protocols/link_dp_training.h" +#include "protocols/link_edp_panel_control.h" +#include "protocols/link_hpd.h" +#include "gpio_service_interface.h" +#include "atomfirmware.h" + +#define DC_LOGGER_INIT(logger) + +#define LINK_INFO(...) \ + DC_LOG_HW_HOTPLUG( \ + __VA_ARGS__) + +/* link factory owns the creation/destruction of link structures. */ +static void construct_link_service_factory(struct link_service *link_srv) +{ + + link_srv->create_link = link_create; + link_srv->destroy_link = link_destroy; +} + +/* link_detection manages link detection states and receiver states by using + * various link protocols. It also provides helper functions to interpret + * certain capabilities or status based on the states it manages or retrieve + * them directly from connected receivers. + */ +static void construct_link_service_detection(struct link_service *link_srv) +{ + link_srv->detect_link = link_detect; + link_srv->detect_connection_type = link_detect_connection_type; + link_srv->add_remote_sink = link_add_remote_sink; + link_srv->remove_remote_sink = link_remove_remote_sink; + link_srv->get_hpd_state = link_get_hpd_state; + link_srv->get_hpd_gpio = link_get_hpd_gpio; + link_srv->enable_hpd = link_enable_hpd; + link_srv->disable_hpd = link_disable_hpd; + link_srv->enable_hpd_filter = link_enable_hpd_filter; + link_srv->reset_cur_dp_mst_topology = link_reset_cur_dp_mst_topology; + link_srv->get_status = link_get_status; + link_srv->is_hdcp1x_supported = link_is_hdcp14; + link_srv->is_hdcp2x_supported = link_is_hdcp22; + link_srv->clear_dprx_states = link_clear_dprx_states; +} + +/* link resource implements accessors to link resource. */ +static void construct_link_service_resource(struct link_service *link_srv) +{ + link_srv->get_cur_res_map = link_get_cur_res_map; + link_srv->restore_res_map = link_restore_res_map; + link_srv->get_cur_link_res = link_get_cur_link_res; +} + +/* link validation owns timing validation against various link limitations. (ex. + * link bandwidth, receiver capability or our hardware capability) It also + * provides helper functions exposing bandwidth formulas used in validation. + */ +static void construct_link_service_validation(struct link_service *link_srv) +{ + link_srv->validate_mode_timing = link_validate_mode_timing; + link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps; + link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth; +} + +/* link dpms owns the programming sequence of stream's dpms state associated + * with the link and link's enable/disable sequences as result of the stream's + * dpms state change. + */ +static void construct_link_service_dpms(struct link_service *link_srv) +{ + link_srv->set_dpms_on = link_set_dpms_on; + link_srv->set_dpms_off = link_set_dpms_off; + link_srv->resume = link_resume; + link_srv->blank_all_dp_displays = link_blank_all_dp_displays; + link_srv->blank_all_edp_displays = link_blank_all_edp_displays; + link_srv->blank_dp_stream = link_blank_dp_stream; + link_srv->increase_mst_payload = link_increase_mst_payload; + link_srv->reduce_mst_payload = link_reduce_mst_payload; + link_srv->set_dsc_on_stream = link_set_dsc_on_stream; + link_srv->set_dsc_enable = link_set_dsc_enable; + link_srv->update_dsc_config = link_update_dsc_config; +} + +/* link ddc implements generic display communication protocols such as i2c, aux + * and scdc. It should not contain any specific applications of these + * protocols such as display capability query, detection, or handshaking such as + * link training. + */ +static void construct_link_service_ddc(struct link_service *link_srv) +{ + link_srv->create_ddc_service = link_create_ddc_service; + link_srv->destroy_ddc_service = link_destroy_ddc_service; + link_srv->query_ddc_data = link_query_ddc_data; + link_srv->aux_transfer_raw = link_aux_transfer_raw; + link_srv->configure_fixed_vs_pe_retimer = link_configure_fixed_vs_pe_retimer; + link_srv->aux_transfer_with_retries_no_mutex = + link_aux_transfer_with_retries_no_mutex; + link_srv->is_in_aux_transaction_mode = link_is_in_aux_transaction_mode; + link_srv->get_aux_defer_delay = link_get_aux_defer_delay; +} + +/* link dp capability implements dp specific link capability retrieval sequence. + * It is responsible for retrieving, parsing, overriding, deciding capability + * obtained from dp link. Link capability consists of encoders, DPRXs, cables, + * retimers, usb and all other possible backend capabilities. + */ +static void construct_link_service_dp_capability(struct link_service *link_srv) +{ + link_srv->dp_is_sink_present = dp_is_sink_present; + link_srv->dp_is_fec_supported = dp_is_fec_supported; + link_srv->dp_is_128b_132b_signal = dp_is_128b_132b_signal; + link_srv->dp_get_max_link_enc_cap = dp_get_max_link_enc_cap; + link_srv->dp_get_verified_link_cap = dp_get_verified_link_cap; + link_srv->dp_get_encoding_format = link_dp_get_encoding_format; + link_srv->dp_should_enable_fec = dp_should_enable_fec; + link_srv->dp_decide_link_settings = link_decide_link_settings; + link_srv->mst_decide_link_encoding_format = + mst_decide_link_encoding_format; + link_srv->edp_decide_link_settings = edp_decide_link_settings; + link_srv->bw_kbps_from_raw_frl_link_rate_data = + link_bw_kbps_from_raw_frl_link_rate_data; + link_srv->dp_overwrite_extended_receiver_cap = + dp_overwrite_extended_receiver_cap; + link_srv->dp_decide_lttpr_mode = dp_decide_lttpr_mode; +} + +/* link dp phy/dpia implements basic dp phy/dpia functionality such as + * enable/disable output and set lane/drive settings. It is responsible for + * maintaining and update software state representing current phy/dpia status + * such as current link settings. + */ +static void construct_link_service_dp_phy_or_dpia(struct link_service *link_srv) +{ + link_srv->dpia_handle_usb4_bandwidth_allocation_for_link = + dpia_handle_usb4_bandwidth_allocation_for_link; + link_srv->dpia_handle_bw_alloc_response = dpia_handle_bw_alloc_response; + link_srv->dp_set_drive_settings = dp_set_drive_settings; + link_srv->dpcd_write_rx_power_ctrl = dpcd_write_rx_power_ctrl; +} + +/* link dp irq handler implements DP HPD short pulse handling sequence according + * to DP specifications + */ +static void construct_link_service_dp_irq_handler(struct link_service *link_srv) +{ + link_srv->dp_parse_link_loss_status = dp_parse_link_loss_status; + link_srv->dp_should_allow_hpd_rx_irq = dp_should_allow_hpd_rx_irq; + link_srv->dp_handle_link_loss = dp_handle_link_loss; + link_srv->dp_read_hpd_rx_irq_data = dp_read_hpd_rx_irq_data; + link_srv->dp_handle_hpd_rx_irq = dp_handle_hpd_rx_irq; +} + +/* link edp panel control implements retrieval and configuration of eDP panel + * features such as PSR and ABM and it also manages specs defined eDP panel + * power sequences. + */ +static void construct_link_service_edp_panel_control(struct link_service *link_srv) +{ + link_srv->edp_panel_backlight_power_on = edp_panel_backlight_power_on; + link_srv->edp_get_backlight_level = edp_get_backlight_level; + link_srv->edp_get_backlight_level_nits = edp_get_backlight_level_nits; + link_srv->edp_set_backlight_level = edp_set_backlight_level; + link_srv->edp_set_backlight_level_nits = edp_set_backlight_level_nits; + link_srv->edp_get_target_backlight_pwm = edp_get_target_backlight_pwm; + link_srv->edp_get_psr_state = edp_get_psr_state; + link_srv->edp_set_psr_allow_active = edp_set_psr_allow_active; + link_srv->edp_setup_psr = edp_setup_psr; + link_srv->edp_set_sink_vtotal_in_psr_active = + edp_set_sink_vtotal_in_psr_active; + link_srv->edp_get_psr_residency = edp_get_psr_residency; + + link_srv->edp_get_replay_state = edp_get_replay_state; + link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active; + link_srv->edp_setup_replay = edp_setup_replay; + link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal; + link_srv->edp_replay_residency = edp_replay_residency; + + link_srv->edp_wait_for_t12 = edp_wait_for_t12; + link_srv->edp_is_ilr_optimization_required = + edp_is_ilr_optimization_required; + link_srv->edp_backlight_enable_aux = edp_backlight_enable_aux; + link_srv->edp_add_delay_for_T9 = edp_add_delay_for_T9; + link_srv->edp_receiver_ready_T9 = edp_receiver_ready_T9; + link_srv->edp_receiver_ready_T7 = edp_receiver_ready_T7; + link_srv->edp_power_alpm_dpcd_enable = edp_power_alpm_dpcd_enable; + link_srv->edp_set_panel_power = edp_set_panel_power; +} + +/* link dp cts implements dp compliance test automation protocols and manual + * testing interfaces for debugging and certification purpose. + */ +static void construct_link_service_dp_cts(struct link_service *link_srv) +{ + link_srv->dp_handle_automated_test = dp_handle_automated_test; + link_srv->dp_set_test_pattern = dp_set_test_pattern; + link_srv->dp_set_preferred_link_settings = + dp_set_preferred_link_settings; + link_srv->dp_set_preferred_training_settings = + dp_set_preferred_training_settings; +} + +/* link dp trace implements tracing interfaces for tracking major dp sequences + * including execution status and timestamps + */ +static void construct_link_service_dp_trace(struct link_service *link_srv) +{ + link_srv->dp_trace_is_initialized = dp_trace_is_initialized; + link_srv->dp_trace_set_is_logged_flag = dp_trace_set_is_logged_flag; + link_srv->dp_trace_is_logged = dp_trace_is_logged; + link_srv->dp_trace_get_lt_end_timestamp = dp_trace_get_lt_end_timestamp; + link_srv->dp_trace_get_lt_counts = dp_trace_get_lt_counts; + link_srv->dp_trace_get_link_loss_count = dp_trace_get_link_loss_count; + link_srv->dp_trace_set_edp_power_timestamp = + dp_trace_set_edp_power_timestamp; + link_srv->dp_trace_get_edp_poweron_timestamp = + dp_trace_get_edp_poweron_timestamp; + link_srv->dp_trace_get_edp_poweroff_timestamp = + dp_trace_get_edp_poweroff_timestamp; + link_srv->dp_trace_source_sequence = dp_trace_source_sequence; +} + +static void construct_link_service(struct link_service *link_srv) +{ + /* All link service functions should fall under some sub categories. + * If a new function doesn't perfectly fall under an existing sub + * category, it must be that you are either adding a whole new aspect of + * responsibility to link service or something doesn't belong to link + * service. In that case please contact the arch owner to arrange a + * design review meeting. + */ + construct_link_service_factory(link_srv); + construct_link_service_detection(link_srv); + construct_link_service_resource(link_srv); + construct_link_service_validation(link_srv); + construct_link_service_dpms(link_srv); + construct_link_service_ddc(link_srv); + construct_link_service_dp_capability(link_srv); + construct_link_service_dp_phy_or_dpia(link_srv); + construct_link_service_dp_irq_handler(link_srv); + construct_link_service_edp_panel_control(link_srv); + construct_link_service_dp_cts(link_srv); + construct_link_service_dp_trace(link_srv); +} + +struct link_service *link_create_link_service(void) +{ + struct link_service *link_srv = kzalloc(sizeof(*link_srv), GFP_KERNEL); + + if (link_srv == NULL) + goto fail; + + construct_link_service(link_srv); + + return link_srv; +fail: + return NULL; +} + +void link_destroy_link_service(struct link_service **link_srv) +{ + kfree(*link_srv); + *link_srv = NULL; +} + +static enum transmitter translate_encoder_to_transmitter( + struct graphics_object_id encoder) +{ + switch (encoder.id) { + case ENCODER_ID_INTERNAL_UNIPHY: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_A; + case ENUM_ID_2: + return TRANSMITTER_UNIPHY_B; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_INTERNAL_UNIPHY1: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_C; + case ENUM_ID_2: + return TRANSMITTER_UNIPHY_D; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_INTERNAL_UNIPHY2: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_E; + case ENUM_ID_2: + return TRANSMITTER_UNIPHY_F; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_INTERNAL_UNIPHY3: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_UNIPHY_G; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_EXTERNAL_NUTMEG: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_NUTMEG_CRT; + default: + return TRANSMITTER_UNKNOWN; + } + break; + case ENCODER_ID_EXTERNAL_TRAVIS: + switch (encoder.enum_id) { + case ENUM_ID_1: + return TRANSMITTER_TRAVIS_CRT; + case ENUM_ID_2: + return TRANSMITTER_TRAVIS_LCD; + default: + return TRANSMITTER_UNKNOWN; + } + break; + default: + return TRANSMITTER_UNKNOWN; + } +} + +static uint8_t translate_dig_inst_to_pwrseq_inst(struct dc_link *link) +{ + uint8_t pwrseq_inst = 0xF; + + switch (link->eng_id) { + case ENGINE_ID_DIGA: + pwrseq_inst = 0; + break; + case ENGINE_ID_DIGB: + pwrseq_inst = 1; + break; + default: + DC_LOG_WARNING("Unsupported pwrseq engine id: %d!\n", link->eng_id); + ASSERT(false); + break; + } + + return pwrseq_inst; +} + + +static void link_destruct(struct dc_link *link) +{ + int i; + + if (link->hpd_gpio) { + dal_gpio_destroy_irq(&link->hpd_gpio); + link->hpd_gpio = NULL; + } + + if (link->ddc) + link_destroy_ddc_service(&link->ddc); + + if (link->panel_cntl) + link->panel_cntl->funcs->destroy(&link->panel_cntl); + + if (link->link_enc) { + /* Update link encoder resource tracking variables. These are used for + * the dynamic assignment of link encoders to streams. Virtual links + * are not assigned encoder resources on creation. + */ + if (link->link_id.id != CONNECTOR_ID_VIRTUAL) { + link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = NULL; + link->dc->res_pool->dig_link_enc_count--; + } + link->link_enc->funcs->destroy(&link->link_enc); + } + + if (link->local_sink) + dc_sink_release(link->local_sink); + + for (i = 0; i < link->sink_count; ++i) + dc_sink_release(link->remote_sinks[i]); +} + +static enum channel_id get_ddc_line(struct dc_link *link) +{ + struct ddc *ddc; + enum channel_id channel; + + channel = CHANNEL_ID_UNKNOWN; + + ddc = get_ddc_pin(link->ddc); + + if (ddc) { + switch (dal_ddc_get_line(ddc)) { + case GPIO_DDC_LINE_DDC1: + channel = CHANNEL_ID_DDC1; + break; + case GPIO_DDC_LINE_DDC2: + channel = CHANNEL_ID_DDC2; + break; + case GPIO_DDC_LINE_DDC3: + channel = CHANNEL_ID_DDC3; + break; + case GPIO_DDC_LINE_DDC4: + channel = CHANNEL_ID_DDC4; + break; + case GPIO_DDC_LINE_DDC5: + channel = CHANNEL_ID_DDC5; + break; + case GPIO_DDC_LINE_DDC6: + channel = CHANNEL_ID_DDC6; + break; + case GPIO_DDC_LINE_DDC_VGA: + channel = CHANNEL_ID_DDC_VGA; + break; + case GPIO_DDC_LINE_I2C_PAD: + channel = CHANNEL_ID_I2C_PAD; + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + } + + return channel; +} + +static bool construct_phy(struct dc_link *link, + const struct link_init_data *init_params) +{ + uint8_t i; + struct ddc_service_init_data ddc_service_init_data = { 0 }; + struct dc_context *dc_ctx = init_params->ctx; + struct encoder_init_data enc_init_data = { 0 }; + struct panel_cntl_init_data panel_cntl_init_data = { 0 }; + struct integrated_info info = { 0 }; + struct dc_bios *bios = init_params->dc->ctx->dc_bios; + const struct dc_vbios_funcs *bp_funcs = bios->funcs; + struct bp_disp_connector_caps_info disp_connect_caps_info = { 0 }; + + DC_LOGGER_INIT(dc_ctx->logger); + + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; + link->link_status.dpcd_caps = &link->dpcd_caps; + + link->dc = init_params->dc; + link->ctx = dc_ctx; + link->link_index = init_params->link_index; + + memset(&link->preferred_training_settings, 0, + sizeof(struct dc_link_training_overrides)); + memset(&link->preferred_link_setting, 0, + sizeof(struct dc_link_settings)); + + link->link_id = + bios->funcs->get_connector_id(bios, init_params->connector_index); + + link->ep_type = DISPLAY_ENDPOINT_PHY; + + DC_LOG_DC("BIOS object table - link_id: %d", link->link_id.id); + + if (bios->funcs->get_disp_connector_caps_info) { + bios->funcs->get_disp_connector_caps_info(bios, link->link_id, &disp_connect_caps_info); + link->is_internal_display = disp_connect_caps_info.INTERNAL_DISPLAY; + DC_LOG_DC("BIOS object table - is_internal_display: %d", link->is_internal_display); + } + + if (link->link_id.type != OBJECT_TYPE_CONNECTOR) { + dm_output_to_console("%s: Invalid Connector ObjectID from Adapter Service for connector index:%d! type %d expected %d\n", + __func__, init_params->connector_index, + link->link_id.type, OBJECT_TYPE_CONNECTOR); + goto create_fail; + } + + if (link->dc->res_pool->funcs->link_init) + link->dc->res_pool->funcs->link_init(link); + + link->hpd_gpio = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (link->hpd_gpio) { + dal_gpio_open(link->hpd_gpio, GPIO_MODE_INTERRUPT); + dal_gpio_unlock_pin(link->hpd_gpio); + link->irq_source_hpd = dal_irq_get_source(link->hpd_gpio); + + DC_LOG_DC("BIOS object table - hpd_gpio id: %d", link->hpd_gpio->id); + DC_LOG_DC("BIOS object table - hpd_gpio en: %d", link->hpd_gpio->en); + } + + switch (link->link_id.id) { + case CONNECTOR_ID_HDMI_TYPE_A: + link->connector_signal = SIGNAL_TYPE_HDMI_TYPE_A; + + break; + case CONNECTOR_ID_SINGLE_LINK_DVID: + case CONNECTOR_ID_SINGLE_LINK_DVII: + link->connector_signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + case CONNECTOR_ID_DUAL_LINK_DVID: + case CONNECTOR_ID_DUAL_LINK_DVII: + link->connector_signal = SIGNAL_TYPE_DVI_DUAL_LINK; + break; + case CONNECTOR_ID_DISPLAY_PORT: + case CONNECTOR_ID_USBC: + link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; + + if (link->hpd_gpio) + link->irq_source_hpd_rx = + dal_irq_get_rx_source(link->hpd_gpio); + + break; + case CONNECTOR_ID_EDP: + link->connector_signal = SIGNAL_TYPE_EDP; + + if (link->hpd_gpio) { + if (!link->dc->config.allow_edp_hotplug_detection) + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + + switch (link->dc->config.allow_edp_hotplug_detection) { + case HPD_EN_FOR_ALL_EDP: + link->irq_source_hpd_rx = + dal_irq_get_rx_source(link->hpd_gpio); + break; + case HPD_EN_FOR_PRIMARY_EDP_ONLY: + if (link->link_index == 0) + link->irq_source_hpd_rx = + dal_irq_get_rx_source(link->hpd_gpio); + else + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + break; + case HPD_EN_FOR_SECONDARY_EDP_ONLY: + if (link->link_index == 1) + link->irq_source_hpd_rx = + dal_irq_get_rx_source(link->hpd_gpio); + else + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + break; + default: + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + break; + } + } + + break; + case CONNECTOR_ID_LVDS: + link->connector_signal = SIGNAL_TYPE_LVDS; + break; + default: + DC_LOG_WARNING("Unsupported Connector type:%d!\n", + link->link_id.id); + goto create_fail; + } + + LINK_INFO("Connector[%d] description: signal: %s\n", + init_params->connector_index, + signal_type_to_string(link->connector_signal)); + + ddc_service_init_data.ctx = link->ctx; + ddc_service_init_data.id = link->link_id; + ddc_service_init_data.link = link; + link->ddc = link_create_ddc_service(&ddc_service_init_data); + + if (!link->ddc) { + DC_ERROR("Failed to create ddc_service!\n"); + goto ddc_create_fail; + } + + if (!link->ddc->ddc_pin) { + DC_ERROR("Failed to get I2C info for connector!\n"); + goto ddc_create_fail; + } + + link->ddc_hw_inst = + dal_ddc_get_line(get_ddc_pin(link->ddc)); + + enc_init_data.ctx = dc_ctx; + bp_funcs->get_src_obj(dc_ctx->dc_bios, link->link_id, 0, + &enc_init_data.encoder); + enc_init_data.connector = link->link_id; + enc_init_data.channel = get_ddc_line(link); + enc_init_data.hpd_source = get_hpd_line(link); + + link->hpd_src = enc_init_data.hpd_source; + + enc_init_data.transmitter = + translate_encoder_to_transmitter(enc_init_data.encoder); + link->link_enc = + link->dc->res_pool->funcs->link_enc_create(dc_ctx, &enc_init_data); + + DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); + DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); + + if (!link->link_enc) { + DC_ERROR("Failed to create link encoder!\n"); + goto link_enc_create_fail; + } + + /* Update link encoder tracking variables. These are used for the dynamic + * assignment of link encoders to streams. + */ + link->eng_id = link->link_enc->preferred_engine; + link->dc->res_pool->link_encoders[link->eng_id - ENGINE_ID_DIGA] = link->link_enc; + link->dc->res_pool->dig_link_enc_count++; + + link->link_enc_hw_inst = link->link_enc->transmitter; + + if (link->dc->res_pool->funcs->panel_cntl_create && + (link->link_id.id == CONNECTOR_ID_EDP || + link->link_id.id == CONNECTOR_ID_LVDS)) { + panel_cntl_init_data.ctx = dc_ctx; + panel_cntl_init_data.inst = panel_cntl_init_data.ctx->dc_edp_id_count; + panel_cntl_init_data.pwrseq_inst = translate_dig_inst_to_pwrseq_inst(link); + link->panel_cntl = + link->dc->res_pool->funcs->panel_cntl_create( + &panel_cntl_init_data); + panel_cntl_init_data.ctx->dc_edp_id_count++; + + if (link->panel_cntl == NULL) { + DC_ERROR("Failed to create link panel_cntl!\n"); + goto panel_cntl_create_fail; + } + } + for (i = 0; i < 4; i++) { + if (bp_funcs->get_device_tag(dc_ctx->dc_bios, + link->link_id, i, + &link->device_tag) != BP_RESULT_OK) { + DC_ERROR("Failed to find device tag!\n"); + goto device_tag_fail; + } + + /* Look for device tag that matches connector signal, + * CRT for rgb, LCD for other supported signal tyes + */ + if (!bp_funcs->is_device_id_supported(dc_ctx->dc_bios, + link->device_tag.dev_id)) + continue; + if (link->device_tag.dev_id.device_type == DEVICE_TYPE_CRT && + link->connector_signal != SIGNAL_TYPE_RGB) + continue; + if (link->device_tag.dev_id.device_type == DEVICE_TYPE_LCD && + link->connector_signal == SIGNAL_TYPE_RGB) + continue; + + DC_LOG_DC("BIOS object table - device_tag.acpi_device: %d", link->device_tag.acpi_device); + DC_LOG_DC("BIOS object table - device_tag.dev_id.device_type: %d", link->device_tag.dev_id.device_type); + DC_LOG_DC("BIOS object table - device_tag.dev_id.enum_id: %d", link->device_tag.dev_id.enum_id); + break; + } + + if (bios->integrated_info) + info = *bios->integrated_info; + + /* Look for channel mapping corresponding to connector and device tag */ + for (i = 0; i < MAX_NUMBER_OF_EXT_DISPLAY_PATH; i++) { + struct external_display_path *path = + &info.ext_disp_conn_info.path[i]; + + if (path->device_connector_id.enum_id == link->link_id.enum_id && + path->device_connector_id.id == link->link_id.id && + path->device_connector_id.type == link->link_id.type) { + if (link->device_tag.acpi_device != 0 && + path->device_acpi_enum == link->device_tag.acpi_device) { + link->ddi_channel_mapping = path->channel_mapping; + link->chip_caps = path->caps; + DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); + DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); + } else if (path->device_tag == + link->device_tag.dev_id.raw_device_tag) { + link->ddi_channel_mapping = path->channel_mapping; + link->chip_caps = path->caps; + DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw); + DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps); + } + + if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) { + link->bios_forced_drive_settings.VOLTAGE_SWING = + (info.ext_disp_conn_info.fixdpvoltageswing & 0x3); + link->bios_forced_drive_settings.PRE_EMPHASIS = + ((info.ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3); + } + + break; + } + } + + if (bios->funcs->get_atom_dc_golden_table) + bios->funcs->get_atom_dc_golden_table(bios); + + /* + * TODO check if GPIO programmed correctly + * + * If GPIO isn't programmed correctly HPD might not rise or drain + * fast enough, leading to bounces. + */ + program_hpd_filter(link); + + link->psr_settings.psr_vtotal_control_support = false; + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + DC_LOG_DC("BIOS object table - %s finished successfully.\n", __func__); + return true; +device_tag_fail: + link->link_enc->funcs->destroy(&link->link_enc); +link_enc_create_fail: + if (link->panel_cntl != NULL) + link->panel_cntl->funcs->destroy(&link->panel_cntl); +panel_cntl_create_fail: + link_destroy_ddc_service(&link->ddc); +ddc_create_fail: +create_fail: + + if (link->hpd_gpio) { + dal_gpio_destroy_irq(&link->hpd_gpio); + link->hpd_gpio = NULL; + } + + DC_LOG_DC("BIOS object table - %s failed.\n", __func__); + return false; +} + +static bool construct_dpia(struct dc_link *link, + const struct link_init_data *init_params) +{ + struct ddc_service_init_data ddc_service_init_data = { 0 }; + struct dc_context *dc_ctx = init_params->ctx; + + DC_LOGGER_INIT(dc_ctx->logger); + + /* Initialized irq source for hpd and hpd rx */ + link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; + link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID; + link->link_status.dpcd_caps = &link->dpcd_caps; + + link->dc = init_params->dc; + link->ctx = dc_ctx; + link->link_index = init_params->link_index; + + memset(&link->preferred_training_settings, 0, + sizeof(struct dc_link_training_overrides)); + memset(&link->preferred_link_setting, 0, + sizeof(struct dc_link_settings)); + + /* Dummy Init for linkid */ + link->link_id.type = OBJECT_TYPE_CONNECTOR; + link->link_id.id = CONNECTOR_ID_DISPLAY_PORT; + link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index; + link->is_internal_display = false; + link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT; + LINK_INFO("Connector[%d] description:signal %d\n", + init_params->connector_index, + link->connector_signal); + + link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA; + link->is_dig_mapping_flexible = true; + + /* TODO: Initialize link : funcs->link_init */ + + ddc_service_init_data.ctx = link->ctx; + ddc_service_init_data.id = link->link_id; + ddc_service_init_data.link = link; + /* Set indicator for dpia link so that ddc wont be created */ + ddc_service_init_data.is_dpia_link = true; + + link->ddc = link_create_ddc_service(&ddc_service_init_data); + if (!link->ddc) { + DC_ERROR("Failed to create ddc_service!\n"); + goto ddc_create_fail; + } + + /* Set dpia port index : 0 to number of dpia ports */ + link->ddc_hw_inst = init_params->connector_index; + + // Assign Dpia preferred eng_id + if (link->dc->res_pool->funcs->get_preferred_eng_id_dpia) + link->dpia_preferred_eng_id = link->dc->res_pool->funcs->get_preferred_eng_id_dpia(link->ddc_hw_inst); + + /* TODO: Create link encoder */ + + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + + /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */ + link->wa_flags.dp_mot_reset_segment = true; + + return true; + +ddc_create_fail: + return false; +} + +static bool link_construct(struct dc_link *link, + const struct link_init_data *init_params) +{ + /* Handle dpia case */ + if (init_params->is_dpia_link == true) + return construct_dpia(link, init_params); + else + return construct_phy(link, init_params); +} + +struct dc_link *link_create(const struct link_init_data *init_params) +{ + struct dc_link *link = + kzalloc(sizeof(*link), GFP_KERNEL); + + if (NULL == link) + goto alloc_fail; + + if (false == link_construct(link, init_params)) + goto construct_fail; + + return link; + +construct_fail: + kfree(link); + +alloc_fail: + return NULL; +} + +void link_destroy(struct dc_link **link) +{ + link_destruct(*link); + kfree(*link); + *link = NULL; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.h b/drivers/gpu/drm/amd/display/dc/link/link_factory.h new file mode 100644 index 0000000000..e96220d48d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.h @@ -0,0 +1,31 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_FACTORY_H__ +#define __LINK_FACTORY_H__ +#include "link.h" +struct dc_link *link_create(const struct link_init_data *init_params); +void link_destroy(struct dc_link **link); + +#endif /* __LINK_FACTORY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c new file mode 100644 index 0000000000..4b5eccd994 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_frl.c @@ -0,0 +1,62 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "link_hwss_hpo_frl.h" +#include "core_types.h" +#include "virtual/virtual_link_hwss.h" + +static void setup_hpo_frl_stream_attribute(struct pipe_ctx *pipe_ctx) +{ + struct hpo_frl_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_frl_stream_enc; + struct dc_stream_state *stream = pipe_ctx->stream; + struct pipe_ctx *odm_pipe; + int odm_combine_num_segments = 1; + + /* get number of ODM combine input segments */ + for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) + odm_combine_num_segments++; + + stream_enc->funcs->hdmi_frl_set_stream_attribute( + stream_enc, + &stream->timing, + &stream->link->frl_link_settings.borrow_params, + odm_combine_num_segments); +} + +static const struct link_hwss hpo_frl_link_hwss = { + .setup_stream_encoder = virtual_setup_stream_encoder, + .reset_stream_encoder = virtual_reset_stream_encoder, + .setup_stream_attribute = setup_hpo_frl_stream_attribute, +}; + +bool can_use_hpo_frl_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link_res->hpo_frl_link_enc != NULL; +} + +const struct link_hwss *get_hpo_frl_link_hwss(void) +{ + return &hpo_frl_link_hwss; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.c b/drivers/gpu/drm/amd/display/dc/link/link_resource.c new file mode 100644 index 0000000000..bd42bb273c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.c @@ -0,0 +1,114 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +/* FILE POLICY AND INTENDED USAGE: + * This file implements accessors to link resource. + */ + +#include "link_resource.h" +#include "protocols/link_dp_capability.h" + +void link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res) +{ + int i; + struct pipe_ctx *pipe = NULL; + + memset(link_res, 0, sizeof(*link_res)); + + for (i = 0; i < MAX_PIPES; i++) { + pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { + if (pipe->stream->link == link) { + *link_res = pipe->link_res; + break; + } + } + } + +} + +void link_get_cur_res_map(const struct dc *dc, uint32_t *map) +{ + struct dc_link *link; + uint32_t i; + uint32_t hpo_dp_recycle_map = 0; + + *map = 0; + + if (dc->caps.dp_hpo) { + for (i = 0; i < dc->caps.max_links; i++) { + link = dc->links[i]; + if (link->link_status.link_active && + link_dp_get_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING && + link_dp_get_encoding_format(&link->cur_link_settings) != DP_128b_132b_ENCODING) + /* hpo dp link encoder is considered as recycled, when RX reports 128b/132b encoding capability + * but current link doesn't use it. + */ + hpo_dp_recycle_map |= (1 << i); + } + *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); + } +} + +void link_restore_res_map(const struct dc *dc, uint32_t *map) +{ + struct dc_link *link; + uint32_t i; + unsigned int available_hpo_dp_count; + uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) + >> LINK_RES_HPO_DP_REC_MAP__SHIFT; + + if (dc->caps.dp_hpo) { + available_hpo_dp_count = dc->res_pool->hpo_dp_link_enc_count; + /* remove excess 128b/132b encoding support for not recycled links */ + for (i = 0; i < dc->caps.max_links; i++) { + if ((hpo_dp_recycle_map & (1 << i)) == 0) { + link = dc->links[i]; + if (link->type != dc_connection_none && + link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + if (available_hpo_dp_count > 0) + available_hpo_dp_count--; + else + /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ + link->verified_link_cap.link_rate = LINK_RATE_HIGH3; + } + } + } + /* remove excess 128b/132b encoding support for recycled links */ + for (i = 0; i < dc->caps.max_links; i++) { + if ((hpo_dp_recycle_map & (1 << i)) != 0) { + link = dc->links[i]; + if (link->type != dc_connection_none && + link_dp_get_encoding_format(&link->verified_link_cap) == DP_128b_132b_ENCODING) { + if (available_hpo_dp_count > 0) + available_hpo_dp_count--; + else + /* remove 128b/132b encoding capability by limiting verified link rate to HBR3 */ + link->verified_link_cap.link_rate = LINK_RATE_HIGH3; + } + } + } + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_resource.h b/drivers/gpu/drm/amd/display/dc/link/link_resource.h new file mode 100644 index 0000000000..1907bda3cb --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_resource.h @@ -0,0 +1,32 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_RESOURCE_H__ +#define __LINK_RESOURCE_H__ +#include "link.h" +void link_get_cur_res_map(const struct dc *dc, uint32_t *map); +void link_restore_res_map(const struct dc *dc, uint32_t *map); +void link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res); +#endif /* __LINK_RESOURCE_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c new file mode 100644 index 0000000000..b45fda96ea --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -0,0 +1,368 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file owns timing validation against various link limitations. (ex. + * link bandwidth, receiver capability or our hardware capability) It also + * provides helper functions exposing bandwidth formulas used in validation. + */ +#include "link_validation.h" +#include "protocols/link_dp_capability.h" +#include "protocols/link_dp_dpia_bw.h" +#include "resource.h" + +#define DC_LOGGER_INIT(logger) + +static uint32_t get_tmds_output_pixel_clock_100hz(const struct dc_crtc_timing *timing) +{ + + uint32_t pxl_clk = timing->pix_clk_100hz; + + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + pxl_clk /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + pxl_clk = pxl_clk * 2 / 3; + + if (timing->display_color_depth == COLOR_DEPTH_101010) + pxl_clk = pxl_clk * 10 / 8; + else if (timing->display_color_depth == COLOR_DEPTH_121212) + pxl_clk = pxl_clk * 12 / 8; + + return pxl_clk; +} + +static bool dp_active_dongle_validate_timing( + const struct dc_crtc_timing *timing, + const struct dpcd_caps *dpcd_caps) +{ + const struct dc_dongle_caps *dongle_caps = &dpcd_caps->dongle_caps; + + switch (dpcd_caps->dongle_type) { + case DISPLAY_DONGLE_DP_VGA_CONVERTER: + case DISPLAY_DONGLE_DP_DVI_CONVERTER: + case DISPLAY_DONGLE_DP_DVI_DONGLE: + if (timing->pixel_encoding == PIXEL_ENCODING_RGB) + return true; + else + return false; + default: + break; + } + + if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && + dongle_caps->extendedCapValid == true) { + /* Check Pixel Encoding */ + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + break; + case PIXEL_ENCODING_YCBCR422: + if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) + return false; + break; + case PIXEL_ENCODING_YCBCR420: + if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) + return false; + break; + default: + /* Invalid Pixel Encoding*/ + return false; + } + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + case COLOR_DEPTH_888: + /*888 and 666 should always be supported*/ + break; + case COLOR_DEPTH_101010: + if (dongle_caps->dp_hdmi_max_bpc < 10) + return false; + break; + case COLOR_DEPTH_121212: + if (dongle_caps->dp_hdmi_max_bpc < 12) + return false; + break; + case COLOR_DEPTH_141414: + case COLOR_DEPTH_161616: + default: + /* These color depths are currently not supported */ + return false; + } + + /* Check 3D format */ + switch (timing->timing_3d_format) { + case TIMING_3D_FORMAT_NONE: + case TIMING_3D_FORMAT_FRAME_ALTERNATE: + /*Only frame alternate 3D is supported on active dongle*/ + break; + default: + /*other 3D formats are not supported due to bad infoframe translation */ + return false; + } + + if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter + struct dc_crtc_timing outputTiming = *timing; + +#if defined(CONFIG_DRM_AMD_DC_FP) + if (timing->flags.DSC && !timing->dsc_cfg.is_frl) + /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ + outputTiming.flags.DSC = 0; +#endif + if (dc_bandwidth_in_kbps_from_timing(&outputTiming, DC_LINK_ENCODING_HDMI_FRL) > + dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + return false; + } else { // DP to HDMI TMDS converter + if (get_tmds_output_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) + return false; + } + } + + if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && + dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 && + dongle_caps->dfp_cap_ext.supported) { + + if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000)) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable) + return false; + + if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable) + return false; + + if (timing->pixel_encoding == PIXEL_ENCODING_RGB) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_666 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc) + return false; + } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) { + if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb) + return false; + if (timing->display_color_depth == COLOR_DEPTH_888 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_101010 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_121212 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc) + return false; + else if (timing->display_color_depth == COLOR_DEPTH_161616 && + !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc) + return false; + } + } + + return true; +} + +uint32_t dp_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + uint32_t total_data_bw_efficiency_x10000 = 0; + uint32_t link_rate_per_lane_kbps = 0; + + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + /* For 8b/10b encoding: + * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane. + * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported. + */ + link_rate_per_lane_kbps = link_settings->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000; + if (dp_should_enable_fec(link)) { + total_data_bw_efficiency_x10000 /= 100; + total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100; + } + break; + case DP_128b_132b_ENCODING: + /* For 128b/132b encoding: + * link rate is defined in the unit of 10mbps per lane. + * total data bandwidth efficiency is always 96.71%. + */ + link_rate_per_lane_kbps = link_settings->link_rate * 10000; + total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000; + break; + default: + break; + } + + /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ + return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; +} + +static bool dp_validate_mode_timing( + struct dc_link *link, + const struct dc_crtc_timing *timing) +{ + uint32_t req_bw; + uint32_t max_bw; + + const struct dc_link_settings *link_setting; + + /* According to spec, VSC SDP should be used if pixel format is YCbCr420 */ + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && + !link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED && + dal_graphics_object_id_get_connector_id(link->link_id) != CONNECTOR_ID_VIRTUAL) + return false; + + /*always DP fail safe mode*/ + if ((timing->pix_clk_100hz / 10) == (uint32_t) 25175 && + timing->h_addressable == (uint32_t) 640 && + timing->v_addressable == (uint32_t) 480) + return true; + + link_setting = dp_get_verified_link_cap(link); + + /* TODO: DYNAMIC_VALIDATION needs to be implemented */ + /*if (flags.DYNAMIC_VALIDATION == 1 && + link->verified_link_cap.lane_count != LANE_COUNT_UNKNOWN) + link_setting = &link->verified_link_cap; + */ + + req_bw = dc_bandwidth_in_kbps_from_timing(timing, dc_link_get_highest_encoding_format(link)); + max_bw = dp_link_bandwidth_kbps(link, link_setting); + + if (req_bw <= max_bw) { + /* remember the biggest mode here, during + * initial link training (to get + * verified_link_cap), LS sends event about + * cannot train at reported cap to upper + * layer and upper layer will re-enumerate modes. + * this is not necessary if the lower + * verified_link_cap is enough to drive + * all the modes */ + + /* TODO: DYNAMIC_VALIDATION needs to be implemented */ + /* if (flags.DYNAMIC_VALIDATION == 1) + dpsst->max_req_bw_for_verified_linkcap = dal_max( + dpsst->max_req_bw_for_verified_linkcap, req_bw); */ + return true; + } else + return false; +} + +enum dc_status link_validate_mode_timing( + const struct dc_stream_state *stream, + struct dc_link *link, + const struct dc_crtc_timing *timing) +{ + uint32_t max_pix_clk = stream->link->dongle_max_pix_clk * 10; + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + + /* A hack to avoid failing any modes for EDID override feature on + * topology change such as lower quality cable for DP or different dongle + */ + if (link->remote_sinks[0] && link->remote_sinks[0]->sink_signal == SIGNAL_TYPE_VIRTUAL) + return DC_OK; + + /* Passive Dongle */ + if (max_pix_clk != 0 && get_tmds_output_pixel_clock_100hz(timing) > max_pix_clk) + return DC_EXCEED_DONGLE_CAP; + + /* Active Dongle*/ + if (!dp_active_dongle_validate_timing(timing, dpcd_caps)) + return DC_EXCEED_DONGLE_CAP; + + switch (stream->signal) { + case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_DISPLAY_PORT: + if (!dp_validate_mode_timing( + link, + timing)) + return DC_NO_DP_LINK_BANDWIDTH; + break; + + default: + break; + } + + return DC_OK; +} + +bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams) +{ + bool ret = true; + int bw_needed[MAX_DPIA_NUM]; + struct dc_link *link[MAX_DPIA_NUM]; + + if (!num_streams || num_streams > MAX_DPIA_NUM) + return ret; + + for (uint8_t i = 0; i < num_streams; ++i) { + + link[i] = stream[i].link; + bw_needed[i] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, + dc_link_get_highest_encoding_format(link[i])); + } + + ret = dpia_validate_usb4_bw(link, bw_needed, num_streams); + + return ret; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h new file mode 100644 index 0000000000..4a954317d0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -0,0 +1,39 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#ifndef __LINK_VALIDATION_H__ +#define __LINK_VALIDATION_H__ +#include "link.h" +enum dc_status link_validate_mode_timing( + const struct dc_stream_state *stream, + struct dc_link *link, + const struct dc_crtc_timing *timing); +bool link_validate_dpia_bandwidth( + const struct dc_stream_state *stream, + const unsigned int num_streams); +uint32_t dp_link_bandwidth_kbps( + const struct dc_link *link, + const struct dc_link_settings *link_settings); + +#endif /* __LINK_VALIDATION_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c new file mode 100644 index 0000000000..ecfd83299e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -0,0 +1,605 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements generic display communication protocols such as i2c, aux + * and scdc. The file should not contain any specific applications of these + * protocols such as display capability query, detection, or handshaking such as + * link training. + */ +#include "link_ddc.h" +#include "vector.h" +#include "dce/dce_aux.h" +#include "dal_asic_id.h" +#include "link_dpcd.h" +#include "dm_helpers.h" +#include "atomfirmware.h" + +#define DC_LOGGER_INIT(logger) + +static const uint8_t DP_VGA_DONGLE_BRANCH_DEV_NAME[] = "DpVga"; +/* DP to Dual link DVI converter */ +static const uint8_t DP_DVI_CONVERTER_ID_4[] = "m2DVIa"; +static const uint8_t DP_DVI_CONVERTER_ID_5[] = "3393N2"; + +struct i2c_payloads { + struct vector payloads; +}; + +struct aux_payloads { + struct vector payloads; +}; + +static bool i2c_payloads_create( + struct dc_context *ctx, + struct i2c_payloads *payloads, + uint32_t count) +{ + if (dal_vector_construct( + &payloads->payloads, ctx, count, sizeof(struct i2c_payload))) + return true; + + return false; +} + +static struct i2c_payload *i2c_payloads_get(struct i2c_payloads *p) +{ + return (struct i2c_payload *)p->payloads.container; +} + +static uint32_t i2c_payloads_get_count(struct i2c_payloads *p) +{ + return p->payloads.count; +} + +static void i2c_payloads_destroy(struct i2c_payloads *p) +{ + if (!p) + return; + + dal_vector_destruct(&p->payloads); +} + +#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b)) + +static void i2c_payloads_add( + struct i2c_payloads *payloads, + uint32_t address, + uint32_t len, + uint8_t *data, + bool write) +{ + uint32_t payload_size = EDID_SEGMENT_SIZE; + uint32_t pos; + + for (pos = 0; pos < len; pos += payload_size) { + struct i2c_payload payload = { + .write = write, + .address = address, + .length = DDC_MIN(payload_size, len - pos), + .data = data + pos }; + dal_vector_append(&payloads->payloads, &payload); + } + +} + +static void ddc_service_construct( + struct ddc_service *ddc_service, + struct ddc_service_init_data *init_data) +{ + enum connector_id connector_id = + dal_graphics_object_id_get_connector_id(init_data->id); + + struct gpio_service *gpio_service = init_data->ctx->gpio_service; + struct graphics_object_i2c_info i2c_info; + struct gpio_ddc_hw_info hw_info; + struct dc_bios *dcb = init_data->ctx->dc_bios; + + ddc_service->link = init_data->link; + ddc_service->ctx = init_data->ctx; + + if (init_data->is_dpia_link || + dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info) != BP_RESULT_OK) { + ddc_service->ddc_pin = NULL; + } else { + DC_LOGGER_INIT(ddc_service->ctx->logger); + DC_LOG_DC("BIOS object table - i2c_line: %d", i2c_info.i2c_line); + DC_LOG_DC("BIOS object table - i2c_engine_id: %d", i2c_info.i2c_engine_id); + + hw_info.ddc_channel = i2c_info.i2c_line; + if (ddc_service->link != NULL) + hw_info.hw_supported = i2c_info.i2c_hw_assist; + else + hw_info.hw_supported = false; + + ddc_service->ddc_pin = dal_gpio_create_ddc( + gpio_service, + i2c_info.gpio_info.clk_a_register_index, + 1 << i2c_info.gpio_info.clk_a_shift, + &hw_info); + } + + ddc_service->flags.EDID_QUERY_DONE_ONCE = false; + ddc_service->flags.FORCE_READ_REPEATED_START = false; + ddc_service->flags.EDID_STRESS_READ = false; + + ddc_service->flags.IS_INTERNAL_DISPLAY = + connector_id == CONNECTOR_ID_EDP || + connector_id == CONNECTOR_ID_LVDS; + + ddc_service->wa.raw = 0; +} + +struct ddc_service *link_create_ddc_service( + struct ddc_service_init_data *init_data) +{ + struct ddc_service *ddc_service; + + ddc_service = kzalloc(sizeof(struct ddc_service), GFP_KERNEL); + + if (!ddc_service) + return NULL; + + ddc_service_construct(ddc_service, init_data); + return ddc_service; +} + +static void ddc_service_destruct(struct ddc_service *ddc) +{ + if (ddc->ddc_pin) + dal_gpio_destroy_ddc(&ddc->ddc_pin); +} + +void link_destroy_ddc_service(struct ddc_service **ddc) +{ + if (!ddc || !*ddc) { + BREAK_TO_DEBUGGER(); + return; + } + ddc_service_destruct(*ddc); + kfree(*ddc); + *ddc = NULL; +} + +void set_ddc_transaction_type( + struct ddc_service *ddc, + enum ddc_transaction_type type) +{ + ddc->transaction_type = type; +} + +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc) +{ + switch (ddc->transaction_type) { + case DDC_TRANSACTION_TYPE_I2C_OVER_AUX: + case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER: + case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_RETRY_DEFER: + return true; + default: + break; + } + return false; +} + +void set_dongle_type(struct ddc_service *ddc, + enum display_dongle_type dongle_type) +{ + ddc->dongle_type = dongle_type; +} + +static uint32_t defer_delay_converter_wa( + struct ddc_service *ddc, + uint32_t defer_delay) +{ + struct dc_link *link = ddc->link; + + if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 && + (link->dpcd_caps.branch_fw_revision[0] < 0x01 || + (link->dpcd_caps.branch_fw_revision[0] == 0x01 && + link->dpcd_caps.branch_fw_revision[1] < 0x40)) && + !memcmp(link->dpcd_caps.branch_dev_name, + DP_VGA_DONGLE_BRANCH_DEV_NAME, + sizeof(link->dpcd_caps.branch_dev_name))) + + return defer_delay > DPVGA_DONGLE_AUX_DEFER_WA_DELAY ? + defer_delay : DPVGA_DONGLE_AUX_DEFER_WA_DELAY; + + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_0080E1 && + !memcmp(link->dpcd_caps.branch_dev_name, + DP_DVI_CONVERTER_ID_4, + sizeof(link->dpcd_caps.branch_dev_name))) + return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY ? + defer_delay : I2C_OVER_AUX_DEFER_WA_DELAY; + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_006037 && + !memcmp(link->dpcd_caps.branch_dev_name, + DP_DVI_CONVERTER_ID_5, + sizeof(link->dpcd_caps.branch_dev_name))) + return defer_delay > I2C_OVER_AUX_DEFER_WA_DELAY_1MS ? + I2C_OVER_AUX_DEFER_WA_DELAY_1MS : defer_delay; + + return defer_delay; +} + +#define DP_TRANSLATOR_DELAY 5 + +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc) +{ + uint32_t defer_delay = 0; + + switch (ddc->transaction_type) { + case DDC_TRANSACTION_TYPE_I2C_OVER_AUX: + if ((DISPLAY_DONGLE_DP_VGA_CONVERTER == ddc->dongle_type) || + (DISPLAY_DONGLE_DP_DVI_CONVERTER == ddc->dongle_type) || + (DISPLAY_DONGLE_DP_HDMI_CONVERTER == + ddc->dongle_type)) { + + defer_delay = DP_TRANSLATOR_DELAY; + + defer_delay = + defer_delay_converter_wa(ddc, defer_delay); + + } else /*sink has a delay different from an Active Converter*/ + defer_delay = 0; + break; + case DDC_TRANSACTION_TYPE_I2C_OVER_AUX_WITH_DEFER: + defer_delay = DP_TRANSLATOR_DELAY; + break; + default: + break; + } + return defer_delay; +} + +static bool submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload) +{ + uint32_t retrieved = 0; + bool ret = false; + + if (!ddc) + return false; + + if (!payload) + return false; + + do { + struct aux_payload current_payload; + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >= + payload->length; + uint32_t payload_length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; + + current_payload.address = payload->address; + current_payload.data = &payload->data[retrieved]; + current_payload.defer_delay = payload->defer_delay; + current_payload.i2c_over_aux = payload->i2c_over_aux; + current_payload.length = payload_length; + /* set mot (middle of transaction) to false if it is the last payload */ + current_payload.mot = is_end_of_payload ? payload->mot:true; + current_payload.write_status_update = false; + current_payload.reply = payload->reply; + current_payload.write = payload->write; + + ret = link_aux_transfer_with_retries_no_mutex(ddc, ¤t_payload); + + retrieved += payload_length; + } while (retrieved < payload->length && ret == true); + + return ret; +} + +bool link_query_ddc_data( + struct ddc_service *ddc, + uint32_t address, + uint8_t *write_buf, + uint32_t write_size, + uint8_t *read_buf, + uint32_t read_size) +{ + bool success = true; + uint32_t payload_size = + link_is_in_aux_transaction_mode(ddc) ? + DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; + + uint32_t write_payloads = + (write_size + payload_size - 1) / payload_size; + + uint32_t read_payloads = + (read_size + payload_size - 1) / payload_size; + + uint32_t payloads_num = write_payloads + read_payloads; + + if (!payloads_num) + return false; + + if (link_is_in_aux_transaction_mode(ddc)) { + struct aux_payload payload; + + payload.i2c_over_aux = true; + payload.address = address; + payload.reply = NULL; + payload.defer_delay = link_get_aux_defer_delay(ddc); + payload.write_status_update = false; + + if (write_size != 0) { + payload.write = true; + /* should not set mot (middle of transaction) to 0 + * if there are pending read payloads + */ + payload.mot = !(read_size == 0); + payload.length = write_size; + payload.data = write_buf; + + success = submit_aux_command(ddc, &payload); + } + + if (read_size != 0 && success) { + payload.write = false; + /* should set mot (middle of transaction) to 0 + * since it is the last payload to send + */ + payload.mot = false; + payload.length = read_size; + payload.data = read_buf; + + success = submit_aux_command(ddc, &payload); + } + } else { + struct i2c_command command = {0}; + struct i2c_payloads payloads; + + if (!i2c_payloads_create(ddc->ctx, &payloads, payloads_num)) + return false; + + command.payloads = i2c_payloads_get(&payloads); + command.number_of_payloads = 0; + command.engine = DDC_I2C_COMMAND_ENGINE; + command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz; + + i2c_payloads_add( + &payloads, address, write_size, write_buf, true); + + i2c_payloads_add( + &payloads, address, read_size, read_buf, false); + + command.number_of_payloads = + i2c_payloads_get_count(&payloads); + + success = dm_helpers_submit_i2c( + ddc->ctx, + ddc->link, + &command); + + i2c_payloads_destroy(&payloads); + } + + return success; +} + +int link_aux_transfer_raw(struct ddc_service *ddc, + struct aux_payload *payload, + enum aux_return_code_type *operation_result) +{ + if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc || + !ddc->ddc_pin) { + return dce_aux_transfer_dmub_raw(ddc, payload, operation_result); + } else { + return dce_aux_transfer_raw(ddc, payload, operation_result); + } +} + +uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link) +{ + uint32_t vendor_lttpr_write_address = 0xF004F; + uint8_t offset; + + switch (link->dpcd_caps.lttpr_caps.phy_repeater_cnt) { + case 0x80: // 1 lttpr repeater + offset = 1; + break; + case 0x40: // 2 lttpr repeaters + offset = 2; + break; + case 0x20: // 3 lttpr repeaters + offset = 3; + break; + case 0x10: // 4 lttpr repeaters + offset = 4; + break; + case 0x08: // 5 lttpr repeaters + offset = 5; + break; + case 0x04: // 6 lttpr repeaters + offset = 6; + break; + case 0x02: // 7 lttpr repeaters + offset = 7; + break; + case 0x01: // 8 lttpr repeaters + offset = 8; + break; + default: + offset = 0xFF; + } + + if (offset != 0xFF) { + vendor_lttpr_write_address += + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + } + return vendor_lttpr_write_address; +} + +uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link) +{ + return link_get_fixed_vs_pe_retimer_write_address(link) + 4; +} + +bool link_configure_fixed_vs_pe_retimer(struct ddc_service *ddc, const uint8_t *data, uint32_t length) +{ + struct aux_payload write_payload = { + .i2c_over_aux = false, + .write = true, + .address = link_get_fixed_vs_pe_retimer_write_address(ddc->link), + .length = length, + .data = (uint8_t *) data, + .reply = NULL, + .mot = I2C_MOT_UNDEF, + .write_status_update = false, + .defer_delay = 0, + }; + + return link_aux_transfer_with_retries_no_mutex(ddc, + &write_payload); +} + +bool link_query_fixed_vs_pe_retimer(struct ddc_service *ddc, uint8_t *data, uint32_t length) +{ + struct aux_payload read_payload = { + .i2c_over_aux = false, + .write = false, + .address = link_get_fixed_vs_pe_retimer_read_address(ddc->link), + .length = length, + .data = data, + .reply = NULL, + .mot = I2C_MOT_UNDEF, + .write_status_update = false, + .defer_delay = 0, + }; + + return link_aux_transfer_with_retries_no_mutex(ddc, + &read_payload); +} + +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, + struct aux_payload *payload) +{ + return dce_aux_transfer_with_retries(ddc, payload); +} + + +bool try_to_configure_aux_timeout(struct ddc_service *ddc, + uint32_t timeout) +{ + bool result = false; + struct ddc *ddc_pin = ddc->ddc_pin; + + if ((ddc->link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + !ddc->link->dc->debug.disable_fixed_vs_aux_timeout_wa && + ddc->ctx->dce_version == DCN_VERSION_3_1) { + /* Fixed VS workaround for AUX timeout */ + const uint32_t fixed_vs_address = 0xF004F; + const uint8_t fixed_vs_data[4] = {0x1, 0x22, 0x63, 0xc}; + + core_link_write_dpcd(ddc->link, + fixed_vs_address, + fixed_vs_data, + sizeof(fixed_vs_data)); + + timeout = 3072; + } + + /* Do not try to access nonexistent DDC pin. */ + if (ddc->link->ep_type != DISPLAY_ENDPOINT_PHY) + return true; + + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout) { + ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout); + result = true; + } + + return result; +} + +struct ddc *get_ddc_pin(struct ddc_service *ddc_service) +{ + return ddc_service->ddc_pin; +} + +void write_scdc_data(struct ddc_service *ddc_service, + uint32_t pix_clk, + bool lte_340_scramble) +{ + bool over_340_mhz = pix_clk > 340000 ? 1 : 0; + uint8_t slave_address = HDMI_SCDC_ADDRESS; + uint8_t offset = HDMI_SCDC_SINK_VERSION; + uint8_t sink_version = 0; + uint8_t write_buffer[2] = {0}; + /*Lower than 340 Scramble bit from SCDC caps*/ + + if (ddc_service->link->local_sink && + ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) + return; + + link_query_ddc_data(ddc_service, slave_address, &offset, + sizeof(offset), &sink_version, sizeof(sink_version)); + if (sink_version == 1) { + /*Source Version = 1*/ + write_buffer[0] = HDMI_SCDC_SOURCE_VERSION; + write_buffer[1] = 1; + link_query_ddc_data(ddc_service, slave_address, + write_buffer, sizeof(write_buffer), NULL, 0); + /*Read Request from SCDC caps*/ + } + write_buffer[0] = HDMI_SCDC_TMDS_CONFIG; + + if (over_340_mhz) { + write_buffer[1] = 3; + } else if (lte_340_scramble) { + write_buffer[1] = 1; + } else { + write_buffer[1] = 0; + } + link_query_ddc_data(ddc_service, slave_address, write_buffer, + sizeof(write_buffer), NULL, 0); +} + +void read_scdc_data(struct ddc_service *ddc_service) +{ + uint8_t slave_address = HDMI_SCDC_ADDRESS; + uint8_t offset = HDMI_SCDC_TMDS_CONFIG; + uint8_t tmds_config = 0; + + if (ddc_service->link->local_sink && + ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite) + return; + + link_query_ddc_data(ddc_service, slave_address, &offset, + sizeof(offset), &tmds_config, sizeof(tmds_config)); + if (tmds_config & 0x1) { + union hdmi_scdc_status_flags_data status_data = {0}; + uint8_t scramble_status = 0; + + offset = HDMI_SCDC_SCRAMBLER_STATUS; + link_query_ddc_data(ddc_service, slave_address, + &offset, sizeof(offset), &scramble_status, + sizeof(scramble_status)); + offset = HDMI_SCDC_STATUS_FLAGS; + link_query_ddc_data(ddc_service, slave_address, + &offset, sizeof(offset), &status_data.byte, + sizeof(status_data.byte)); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h new file mode 100644 index 0000000000..a3e25e55be --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.h @@ -0,0 +1,106 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_DDC_SERVICE_H__ +#define __DAL_DDC_SERVICE_H__ + +#include "link.h" + +#define AUX_POWER_UP_WA_DELAY 500 +#define I2C_OVER_AUX_DEFER_WA_DELAY 70 +#define DPVGA_DONGLE_AUX_DEFER_WA_DELAY 40 +#define I2C_OVER_AUX_DEFER_WA_DELAY_1MS 1 +#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/ + +#define EDID_SEGMENT_SIZE 256 + +struct ddc_service *link_create_ddc_service( + struct ddc_service_init_data *ddc_init_data); + +void link_destroy_ddc_service(struct ddc_service **ddc); + +void set_ddc_transaction_type( + struct ddc_service *ddc, + enum ddc_transaction_type type); + +uint32_t link_get_aux_defer_delay(struct ddc_service *ddc); + +bool link_is_in_aux_transaction_mode(struct ddc_service *ddc); + +bool try_to_configure_aux_timeout(struct ddc_service *ddc, + uint32_t timeout); + +bool link_query_ddc_data( + struct ddc_service *ddc, + uint32_t address, + uint8_t *write_buf, + uint32_t write_size, + uint8_t *read_buf, + uint32_t read_size); + +/* Attempt to submit an aux payload, retrying on timeouts, defers, and busy + * states as outlined in the DP spec. Returns true if the request was + * successful. + * + * NOTE: The function requires explicit mutex on DM side in order to prevent + * potential race condition. DC components should call the dpcd read/write + * function in dm_helpers in order to access dpcd safely + */ +bool link_aux_transfer_with_retries_no_mutex(struct ddc_service *ddc, + struct aux_payload *payload); + +bool link_configure_fixed_vs_pe_retimer( + struct ddc_service *ddc, + const uint8_t *data, + uint32_t length); + +bool link_query_fixed_vs_pe_retimer( + struct ddc_service *ddc, + uint8_t *data, + uint32_t length); + +uint32_t link_get_fixed_vs_pe_retimer_read_address(struct dc_link *link); +uint32_t link_get_fixed_vs_pe_retimer_write_address(struct dc_link *link); + + +void write_scdc_data( + struct ddc_service *ddc_service, + uint32_t pix_clk, + bool lte_340_scramble); + +void read_scdc_data( + struct ddc_service *ddc_service); + +void set_dongle_type(struct ddc_service *ddc, + enum display_dongle_type dongle_type); + +struct ddc *get_ddc_pin(struct ddc_service *ddc_service); + +int link_aux_transfer_raw(struct ddc_service *ddc, + struct aux_payload *payload, + enum aux_return_code_type *operation_result); +#endif /* __DAL_DDC_SERVICE_H__ */ + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c new file mode 100644 index 0000000000..db87aa7b5c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -0,0 +1,2293 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp specific link capability retrieval sequence. It is + * responsible for retrieving, parsing, overriding, deciding capability obtained + * from dp link. Link capability consists of encoders, DPRXs, cables, retimers, + * usb and all other possible backend capabilities. Other components should + * include this header file in order to access link capability. Accessing link + * capability by dereferencing dc_link outside dp_link_capability is not a + * recommended method as it makes the component dependent on the underlying data + * structure used to represent link capability instead of function interfaces. + */ + +#include "link_dp_capability.h" +#include "link_ddc.h" +#include "link_dpcd.h" +#include "link_dp_dpia.h" +#include "link_dp_phy.h" +#include "link_edp_panel_control.h" +#include "link_dp_irq_handler.h" +#include "link/accessories/link_dp_trace.h" +#include "link/link_detection.h" +#include "link/link_validation.h" +#include "link_dp_training.h" +#include "atomfirmware.h" +#include "resource.h" +#include "link_enc_cfg.h" +#include "dc_dmub_srv.h" +#include "gpio_service_interface.h" + +#define DC_LOGGER \ + link->ctx->logger +#define DC_TRACE_LEVEL_MESSAGE(...) /* do nothing */ + +#ifndef MAX +#define MAX(X, Y) ((X) > (Y) ? (X) : (Y)) +#endif +#ifndef MIN +#define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) +#endif + +struct dp_lt_fallback_entry { + enum dc_lane_count lane_count; + enum dc_link_rate link_rate; +}; + +static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { + /* This link training fallback array is ordered by + * link bandwidth from highest to lowest. + * DP specs makes it a normative policy to always + * choose the next highest link bandwidth during + * link training fallback. + */ + {LANE_COUNT_FOUR, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_UHBR20}, + {LANE_COUNT_FOUR, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_UHBR13_5}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR20}, + {LANE_COUNT_TWO, LINK_RATE_UHBR10}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH2}, + {LANE_COUNT_ONE, LINK_RATE_UHBR13_5}, + {LANE_COUNT_TWO, LINK_RATE_HIGH3}, + {LANE_COUNT_ONE, LINK_RATE_UHBR10}, + {LANE_COUNT_TWO, LINK_RATE_HIGH2}, + {LANE_COUNT_FOUR, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_HIGH3}, + {LANE_COUNT_FOUR, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH2}, + {LANE_COUNT_TWO, LINK_RATE_HIGH}, + {LANE_COUNT_TWO, LINK_RATE_LOW}, + {LANE_COUNT_ONE, LINK_RATE_HIGH}, + {LANE_COUNT_ONE, LINK_RATE_LOW}, +}; + +static const struct dc_link_settings fail_safe_link_settings = { + .lane_count = LANE_COUNT_ONE, + .link_rate = LINK_RATE_LOW, + .link_spread = LINK_SPREAD_DISABLED, +}; + +bool is_dp_active_dongle(const struct dc_link *link) +{ + return (link->dpcd_caps.dongle_type >= DISPLAY_DONGLE_DP_VGA_CONVERTER) && + (link->dpcd_caps.dongle_type <= DISPLAY_DONGLE_DP_HDMI_CONVERTER); +} + +bool is_dp_branch_device(const struct dc_link *link) +{ + return link->dpcd_caps.is_branch_dev; +} + +static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc) +{ + switch (bpc) { + case DOWN_STREAM_MAX_8BPC: + return 8; + case DOWN_STREAM_MAX_10BPC: + return 10; + case DOWN_STREAM_MAX_12BPC: + return 12; + case DOWN_STREAM_MAX_16BPC: + return 16; + default: + break; + } + + return -1; +} + +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count) +{ + switch (lttpr_repeater_count) { + case 0x80: // 1 lttpr repeater + return 1; + case 0x40: // 2 lttpr repeaters + return 2; + case 0x20: // 3 lttpr repeaters + return 3; + case 0x10: // 4 lttpr repeaters + return 4; + case 0x08: // 5 lttpr repeaters + return 5; + case 0x04: // 6 lttpr repeaters + return 6; + case 0x02: // 7 lttpr repeaters + return 7; + case 0x01: // 8 lttpr repeaters + return 8; + default: + break; + } + return 0; // invalid value +} + +uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) +{ + switch (bw) { + case 0b001: + return 9000000; + case 0b010: + return 18000000; + case 0b011: + return 24000000; + case 0b100: + return 32000000; + case 0b101: + return 40000000; + case 0b110: + return 48000000; + } + + return 0; +} + +static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) +{ + enum dc_link_rate link_rate; + // LinkRate is normally stored as a multiplier of 0.27 Gbps per lane. Do the translation. + switch (link_rate_in_khz) { + case 1620000: + link_rate = LINK_RATE_LOW; // Rate_1 (RBR) - 1.62 Gbps/Lane + break; + case 2160000: + link_rate = LINK_RATE_RATE_2; // Rate_2 - 2.16 Gbps/Lane + break; + case 2430000: + link_rate = LINK_RATE_RATE_3; // Rate_3 - 2.43 Gbps/Lane + break; + case 2700000: + link_rate = LINK_RATE_HIGH; // Rate_4 (HBR) - 2.70 Gbps/Lane + break; + case 3240000: + link_rate = LINK_RATE_RBR2; // Rate_5 (RBR2)- 3.24 Gbps/Lane + break; + case 4320000: + link_rate = LINK_RATE_RATE_6; // Rate_6 - 4.32 Gbps/Lane + break; + case 5400000: + link_rate = LINK_RATE_HIGH2; // Rate_7 (HBR2)- 5.40 Gbps/Lane + break; + case 6750000: + link_rate = LINK_RATE_RATE_8; // Rate_8 - 6.75 Gbps/Lane + break; + case 8100000: + link_rate = LINK_RATE_HIGH3; // Rate_9 (HBR3)- 8.10 Gbps/Lane + break; + default: + link_rate = LINK_RATE_UNKNOWN; + break; + } + return link_rate; +} + +static union dp_cable_id intersect_cable_id( + union dp_cable_id *a, union dp_cable_id *b) +{ + union dp_cable_id out; + + out.bits.UHBR10_20_CAPABILITY = MIN(a->bits.UHBR10_20_CAPABILITY, + b->bits.UHBR10_20_CAPABILITY); + out.bits.UHBR13_5_CAPABILITY = MIN(a->bits.UHBR13_5_CAPABILITY, + b->bits.UHBR13_5_CAPABILITY); + out.bits.CABLE_TYPE = MAX(a->bits.CABLE_TYPE, b->bits.CABLE_TYPE); + + return out; +} + +/* + * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. + */ +static uint32_t intersect_frl_link_bw_support( + const uint32_t max_supported_frl_bw_in_kbps, + const union hdmi_encoded_link_bw hdmi_encoded_link_bw) +{ + uint32_t supported_bw_in_kbps = max_supported_frl_bw_in_kbps; + + // HDMI_ENCODED_LINK_BW bits are only valid if HDMI Link Configuration bit is 1 (FRL mode) + if (hdmi_encoded_link_bw.bits.FRL_MODE) { + if (hdmi_encoded_link_bw.bits.BW_48Gbps) + supported_bw_in_kbps = 48000000; + else if (hdmi_encoded_link_bw.bits.BW_40Gbps) + supported_bw_in_kbps = 40000000; + else if (hdmi_encoded_link_bw.bits.BW_32Gbps) + supported_bw_in_kbps = 32000000; + else if (hdmi_encoded_link_bw.bits.BW_24Gbps) + supported_bw_in_kbps = 24000000; + else if (hdmi_encoded_link_bw.bits.BW_18Gbps) + supported_bw_in_kbps = 18000000; + else if (hdmi_encoded_link_bw.bits.BW_9Gbps) + supported_bw_in_kbps = 9000000; + } + + return supported_bw_in_kbps; +} + +static enum clock_source_id get_clock_source_id(struct dc_link *link) +{ + enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_UNDEFINED; + struct clock_source *dp_cs = link->dc->res_pool->dp_clock_source; + + if (dp_cs != NULL) { + dp_cs_id = dp_cs->id; + } else { + /* + * dp clock source is not initialized for some reason. + * Should not happen, CLOCK_SOURCE_ID_EXTERNAL will be used + */ + ASSERT(dp_cs); + } + + return dp_cs_id; +} + +static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, + int length) +{ + int retry = 0; + + if (!link->dpcd_caps.dpcd_rev.raw) { + do { + dpcd_write_rx_power_ctrl(link, true); + core_link_read_dpcd(link, DP_DPCD_REV, + dpcd_data, length); + link->dpcd_caps.dpcd_rev.raw = dpcd_data[ + DP_DPCD_REV - + DP_DPCD_REV]; + } while (retry++ < 4 && !link->dpcd_caps.dpcd_rev.raw); + } + + if (link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_VGA_CONVERTER) { + switch (link->dpcd_caps.branch_dev_id) { + /* 0010FA active dongles (DP-VGA, DP-DLDVI converters) power down + * all internal circuits including AUX communication preventing + * reading DPCD table and EDID (spec violation). + * Encoder will skip DP RX power down on disable_output to + * keep receiver powered all the time.*/ + case DP_BRANCH_DEVICE_ID_0010FA: + case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: + link->wa_flags.dp_keep_receiver_powered = true; + break; + + /* TODO: May need work around for other dongles. */ + default: + link->wa_flags.dp_keep_receiver_powered = false; + break; + } + } else + link->wa_flags.dp_keep_receiver_powered = false; +} + +bool dp_is_fec_supported(const struct dc_link *link) +{ + /* TODO - use asic cap instead of link_enc->features + * we no longer know which link enc to use for this link before commit + */ + struct link_encoder *link_enc = NULL; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + return (dc_is_dp_signal(link->connector_signal) && link_enc && + link_enc->features.fec_supported && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE); +} + +bool dp_should_enable_fec(const struct dc_link *link) +{ + bool force_disable = false; + + if (link->fec_state == dc_link_fec_enabled) + force_disable = false; + else if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST && + link->local_sink && + link->local_sink->edid_caps.panel_patch.disable_fec) + force_disable = true; + else if (link->connector_signal == SIGNAL_TYPE_EDP + && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. + dsc_support.DSC_SUPPORT == false + || link->panel_config.dsc.disable_dsc_edp + || !link->dc->caps.edp_dsc_support)) + force_disable = true; + + return !force_disable && dp_is_fec_supported(link); +} + +bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx) +{ + /* If this assert is hit then we have a link encoder dynamic management issue */ + ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true); + return (pipe_ctx->stream_res.hpo_dp_stream_enc && + pipe_ctx->link_res.hpo_dp_link_enc && + dc_is_dp_signal(pipe_ctx->stream->signal)); +} + +bool dp_is_lttpr_present(struct dc_link *link) +{ + return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + link->dpcd_caps.lttpr_caps.max_lane_count > 0 && + link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && + link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); +} + +/* in DP compliance test, DPR-120 may have + * a random value in its MAX_LINK_BW dpcd field. + * We map it to the maximum supported link rate that + * is smaller than MAX_LINK_BW in this case. + */ +static enum dc_link_rate get_link_rate_from_max_link_bw( + uint8_t max_link_bw) +{ + enum dc_link_rate link_rate; + + if (max_link_bw >= LINK_RATE_HIGH3) { + link_rate = LINK_RATE_HIGH3; + } else if (max_link_bw < LINK_RATE_HIGH3 + && max_link_bw >= LINK_RATE_HIGH2) { + link_rate = LINK_RATE_HIGH2; + } else if (max_link_bw < LINK_RATE_HIGH2 + && max_link_bw >= LINK_RATE_HIGH) { + link_rate = LINK_RATE_HIGH; + } else if (max_link_bw < LINK_RATE_HIGH + && max_link_bw >= LINK_RATE_LOW) { + link_rate = LINK_RATE_LOW; + } else { + link_rate = LINK_RATE_UNKNOWN; + } + + return link_rate; +} + +static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; + + if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20) + lttpr_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) + lttpr_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10) + lttpr_max_link_rate = LINK_RATE_UHBR10; + + return lttpr_max_link_rate; +} + +static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN; + + if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + cable_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + cable_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) + cable_max_link_rate = LINK_RATE_UHBR10; + + return cable_max_link_rate; +} + +static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) +{ + return lane_count <= LANE_COUNT_ONE; +} + +static inline bool reached_minimum_link_rate(enum dc_link_rate link_rate) +{ + return link_rate <= LINK_RATE_LOW; +} + +static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) +{ + switch (lane_count) { + case LANE_COUNT_FOUR: + return LANE_COUNT_TWO; + case LANE_COUNT_TWO: + return LANE_COUNT_ONE; + case LANE_COUNT_ONE: + return LANE_COUNT_UNKNOWN; + default: + return LANE_COUNT_UNKNOWN; + } +} + +static enum dc_link_rate reduce_link_rate(const struct dc_link *link, enum dc_link_rate link_rate) +{ + // NEEDSWORK: provide some details about why this function never returns some of the + // obscure link rates such as 4.32 Gbps or 3.24 Gbps and if such behavior is intended. + // + + switch (link_rate) { + case LINK_RATE_UHBR20: + return LINK_RATE_UHBR13_5; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + return LINK_RATE_HIGH3; + case LINK_RATE_HIGH3: + if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->debug.support_eDP1_5) + return LINK_RATE_RATE_8; + return LINK_RATE_HIGH2; + case LINK_RATE_RATE_8: + return LINK_RATE_HIGH2; + case LINK_RATE_HIGH2: + return LINK_RATE_HIGH; + case LINK_RATE_RATE_6: + case LINK_RATE_RBR2: + return LINK_RATE_HIGH; + case LINK_RATE_HIGH: + return LINK_RATE_LOW; + case LINK_RATE_RATE_3: + case LINK_RATE_RATE_2: + return LINK_RATE_LOW; + case LINK_RATE_LOW: + default: + return LINK_RATE_UNKNOWN; + } +} + +static enum dc_lane_count increase_lane_count(enum dc_lane_count lane_count) +{ + switch (lane_count) { + case LANE_COUNT_ONE: + return LANE_COUNT_TWO; + case LANE_COUNT_TWO: + return LANE_COUNT_FOUR; + default: + return LANE_COUNT_UNKNOWN; + } +} + +static enum dc_link_rate increase_link_rate(struct dc_link *link, + enum dc_link_rate link_rate) +{ + switch (link_rate) { + case LINK_RATE_LOW: + return LINK_RATE_HIGH; + case LINK_RATE_HIGH: + return LINK_RATE_HIGH2; + case LINK_RATE_HIGH2: + return LINK_RATE_HIGH3; + case LINK_RATE_HIGH3: + return LINK_RATE_UHBR10; + case LINK_RATE_UHBR10: + /* upto DP2.x specs UHBR13.5 is the only link rate that could be + * not supported by DPRX when higher link rate is supported. + * so we treat it as a special case for code simplicity. When we + * have new specs with more link rates like this, we should + * consider a more generic solution to handle discrete link + * rate capabilities. + */ + return link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 ? + LINK_RATE_UHBR13_5 : LINK_RATE_UHBR20; + case LINK_RATE_UHBR13_5: + return LINK_RATE_UHBR20; + default: + return LINK_RATE_UNKNOWN; + } +} + +static bool decide_fallback_link_setting_max_bw_policy( + struct dc_link *link, + const struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result) +{ + uint8_t cur_idx = 0, next_idx; + bool found = false; + + if (training_result == LINK_TRAINING_ABORT) + return false; + + while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find current index */ + if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count && + dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate) + break; + else + cur_idx++; + + next_idx = cur_idx + 1; + + while (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) + /* find next index */ + if (dp_lt_fallbacks[next_idx].lane_count > max->lane_count || + dp_lt_fallbacks[next_idx].link_rate > max->link_rate) + next_idx++; + else if (dp_lt_fallbacks[next_idx].link_rate == LINK_RATE_UHBR13_5 && + link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5 == 0) + /* upto DP2.x specs UHBR13.5 is the only link rate that + * could be not supported by DPRX when higher link rate + * is supported. so we treat it as a special case for + * code simplicity. When we have new specs with more + * link rates like this, we should consider a more + * generic solution to handle discrete link rate + * capabilities. + */ + next_idx++; + else + break; + + if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) { + cur->lane_count = dp_lt_fallbacks[next_idx].lane_count; + cur->link_rate = dp_lt_fallbacks[next_idx].link_rate; + found = true; + } + + return found; +} + +/* + * function: set link rate and lane count fallback based + * on current link setting and last link training result + * return value: + * true - link setting could be set + * false - has reached minimum setting + * and no further fallback could be done + */ +bool decide_fallback_link_setting( + struct dc_link *link, + struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result) +{ + if (link_dp_get_encoding_format(max) == DP_128b_132b_ENCODING || + link->dc->debug.force_dp2_lt_fallback_method) + return decide_fallback_link_setting_max_bw_policy(link, max, + cur, training_result); + + switch (training_result) { + case LINK_TRAINING_CR_FAIL_LANE0: + case LINK_TRAINING_CR_FAIL_LANE1: + case LINK_TRAINING_CR_FAIL_LANE23: + case LINK_TRAINING_LQA_FAIL: + { + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(link, cur->link_rate); + } else if (!reached_minimum_lane_count(cur->lane_count)) { + cur->link_rate = max->link_rate; + if (training_result == LINK_TRAINING_CR_FAIL_LANE0) + return false; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) + cur->lane_count = LANE_COUNT_ONE; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) + cur->lane_count = LANE_COUNT_TWO; + else + cur->lane_count = reduce_lane_count(cur->lane_count); + } else { + return false; + } + break; + } + case LINK_TRAINING_EQ_FAIL_EQ: + case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: + { + if (!reached_minimum_lane_count(cur->lane_count)) { + cur->lane_count = reduce_lane_count(cur->lane_count); + } else if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(link, cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; + } else { + return false; + } + break; + } + case LINK_TRAINING_EQ_FAIL_CR: + { + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(link, cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; + } else { + return false; + } + break; + } + default: + return false; + } + return true; +} +static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_settings *link_setting, uint32_t req_bw) +{ + struct dc_link_settings initial_link_setting = { + LANE_COUNT_ONE, LINK_RATE_LOW, LINK_SPREAD_DISABLED, false, 0}; + struct dc_link_settings current_link_setting = + initial_link_setting; + uint32_t link_bw; + + if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + link->verified_link_cap.link_rate) { + link_bw = dp_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + + return false; +} + +bool edp_decide_link_settings(struct dc_link *link, + struct dc_link_settings *link_setting, uint32_t req_bw) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (!edp_is_ilr_optimization_enabled(link)) { + *link_setting = link->verified_link_cap; + return true; + } + + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + link->verified_link_cap.link_rate) { + link_bw = dp_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + return false; +} + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate) +{ + struct dc_link_settings initial_link_setting; + struct dc_link_settings current_link_setting; + uint32_t link_bw; + + unsigned int policy = 0; + + policy = link->panel_config.dsc.force_dsc_edp_policy; + if (max_link_rate == LINK_RATE_UNKNOWN) + max_link_rate = link->verified_link_cap.link_rate; + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (!edp_is_ilr_optimization_enabled(link)) { + /* for DSC enabled case, we search for minimum lane count */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = LINK_RATE_LOW; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = false; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + if (req_bw > dp_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dp_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + } else { + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate = initial_link_setting.link_rate; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + current_link_setting.link_rate = + increase_link_rate(link, + current_link_setting.link_rate); + current_link_setting.lane_count = + initial_link_setting.lane_count; + } + } + } + return false; + } + + /* if optimize edp link is supported */ + memset(&initial_link_setting, 0, sizeof(initial_link_setting)); + initial_link_setting.lane_count = LANE_COUNT_ONE; + initial_link_setting.link_rate = link->dpcd_caps.edp_supported_link_rates[0]; + initial_link_setting.link_spread = LINK_SPREAD_DISABLED; + initial_link_setting.use_link_rate_set = true; + initial_link_setting.link_rate_set = 0; + current_link_setting = initial_link_setting; + + /* search for the minimum link setting that: + * 1. is supported according to the link training result + * 2. could support the b/w requested by the timing + */ + while (current_link_setting.link_rate <= + max_link_rate) { + link_bw = dp_link_bandwidth_kbps( + link, + ¤t_link_setting); + if (req_bw <= link_bw) { + *link_setting = current_link_setting; + return true; + } + if (policy) { + /* minimize lane */ + if (current_link_setting.link_rate_set < + link->dpcd_caps.edp_supported_link_rates_count + && current_link_setting.link_rate < max_link_rate) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else { + if (current_link_setting.lane_count < link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + current_link_setting.link_rate_set = initial_link_setting.link_rate_set; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + } else + break; + } + } else { + /* minimize link rate */ + if (current_link_setting.lane_count < + link->verified_link_cap.lane_count) { + current_link_setting.lane_count = + increase_lane_count( + current_link_setting.lane_count); + } else { + if (current_link_setting.link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + current_link_setting.link_rate_set++; + current_link_setting.link_rate = + link->dpcd_caps.edp_supported_link_rates[current_link_setting.link_rate_set]; + current_link_setting.lane_count = + initial_link_setting.lane_count; + } else + break; + } + } + } + return false; +} + +static bool decide_mst_link_settings(const struct dc_link *link, struct dc_link_settings *link_setting) +{ + *link_setting = link->verified_link_cap; + return true; +} + +bool link_decide_link_settings(struct dc_stream_state *stream, + struct dc_link_settings *link_setting) +{ + struct dc_link *link = stream->link; + uint32_t req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, dc_link_get_highest_encoding_format(link)); + + memset(link_setting, 0, sizeof(*link_setting)); + + /* if preferred is specified through AMDDP, use it, if it's enough + * to drive the mode + */ + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + *link_setting = link->preferred_link_setting; + return true; + } + + /* MST doesn't perform link training for now + * TODO: add MST specific link training routine + */ + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + decide_mst_link_settings(link, link_setting); + } else if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* enable edp link optimization for DSC eDP case */ + if (stream->timing.flags.DSC) { + enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; + + if (link->panel_config.dsc.force_dsc_edp_policy) { + /* calculate link max link rate cap*/ + struct dc_link_settings tmp_link_setting; + struct dc_crtc_timing tmp_timing = stream->timing; + uint32_t orig_req_bw; + + tmp_link_setting.link_rate = LINK_RATE_UNKNOWN; + tmp_timing.flags.DSC = 0; + orig_req_bw = dc_bandwidth_in_kbps_from_timing(&tmp_timing, + dc_link_get_highest_encoding_format(link)); + edp_decide_link_settings(link, &tmp_link_setting, orig_req_bw); + max_link_rate = tmp_link_setting.link_rate; + } + decide_edp_link_settings_with_dsc(link, link_setting, req_bw, max_link_rate); + } else { + edp_decide_link_settings(link, link_setting, req_bw); + } + } else { + decide_dp_link_settings(link, link_setting, req_bw); + } + + return link_setting->lane_count != LANE_COUNT_UNKNOWN && + link_setting->link_rate != LINK_RATE_UNKNOWN; +} + +enum dp_link_encoding link_dp_get_encoding_format(const struct dc_link_settings *link_settings) +{ + if ((link_settings->link_rate >= LINK_RATE_LOW) && + (link_settings->link_rate <= LINK_RATE_HIGH3)) + return DP_8b_10b_ENCODING; + else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && + (link_settings->link_rate <= LINK_RATE_UHBR20)) + return DP_128b_132b_ENCODING; + return DP_UNKNOWN_ENCODING; +} + +enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link) +{ + struct dc_link_settings link_settings = {0}; + + if (!dc_is_dp_signal(link->connector_signal)) + return DP_UNKNOWN_ENCODING; + + if (link->preferred_link_setting.lane_count != + LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != + LINK_RATE_UNKNOWN) { + link_settings = link->preferred_link_setting; + } else { + decide_mst_link_settings(link, &link_settings); + } + + return link_dp_get_encoding_format(&link_settings); +} + +static void read_dp_device_vendor_id(struct dc_link *link) +{ + struct dp_device_vendor_id dp_id; + + /* read IEEE branch device id */ + core_link_read_dpcd( + link, + DP_BRANCH_OUI, + (uint8_t *)&dp_id, + sizeof(dp_id)); + + link->dpcd_caps.branch_dev_id = + (dp_id.ieee_oui[0] << 16) + + (dp_id.ieee_oui[1] << 8) + + dp_id.ieee_oui[2]; + + memmove( + link->dpcd_caps.branch_dev_name, + dp_id.ieee_device_id, + sizeof(dp_id.ieee_device_id)); +} + +static enum dc_status wake_up_aux_channel(struct dc_link *link) +{ + enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t aux_channel_retry_cnt = 0; + uint8_t dpcd_power_state = '\0'; + + while (status != DC_OK && aux_channel_retry_cnt < 10) { + status = core_link_read_dpcd(link, DP_SET_POWER, + &dpcd_power_state, sizeof(dpcd_power_state)); + + /* Delay 1 ms if AUX CH is in power down state. Based on spec + * section 2.3.1.2, if AUX CH may be powered down due to + * write to DPCD 600h = 2. Sink AUX CH is monitoring differential + * signal and may need up to 1 ms before being able to reply. + */ + if (status != DC_OK || dpcd_power_state == DP_SET_POWER_D3) { + fsleep(1000); + aux_channel_retry_cnt++; + } + } + + if (status != DC_OK) { + dpcd_power_state = DP_SET_POWER_D0; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_power_state, + sizeof(dpcd_power_state)); + + dpcd_power_state = DP_SET_POWER_D3; + status = core_link_write_dpcd( + link, + DP_SET_POWER, + &dpcd_power_state, + sizeof(dpcd_power_state)); + DC_LOG_DC("%s: Failed to power up sink\n", __func__); + return DC_ERROR_UNEXPECTED; + } + + return DC_OK; +} + +static void get_active_converter_info( + uint8_t data, struct dc_link *link) +{ + union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); + + /* decode converter info*/ + if (!ds_port.fields.PORT_PRESENT) { + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + set_dongle_type(link->ddc, + link->dpcd_caps.dongle_type); + link->dpcd_caps.is_branch_dev = false; + return; + } + + /* DPCD 0x5 bit 0 = 1, it indicate it's branch device */ + link->dpcd_caps.is_branch_dev = ds_port.fields.PORT_PRESENT; + + switch (ds_port.fields.PORT_TYPE) { + case DOWNSTREAM_VGA: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_VGA_CONVERTER; + break; + case DOWNSTREAM_DVI_HDMI_DP_PLUS_PLUS: + /* At this point we don't know is it DVI or HDMI or DP++, + * assume DVI.*/ + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_DP_DVI_CONVERTER; + break; + default: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; + } + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_11) { + uint8_t det_caps[16]; /* CTS 4.2.2.7 expects source to read Detailed Capabilities Info : 00080h-0008F.*/ + union dwnstream_port_caps_byte0 *port_caps = + (union dwnstream_port_caps_byte0 *)det_caps; + if (core_link_read_dpcd(link, DP_DOWNSTREAM_PORT_0, + det_caps, sizeof(det_caps)) == DC_OK) { + + switch (port_caps->bits.DWN_STRM_PORTX_TYPE) { + /*Handle DP case as DONGLE_NONE*/ + case DOWN_STREAM_DETAILED_DP: + link->dpcd_caps.dongle_type = DISPLAY_DONGLE_NONE; + break; + case DOWN_STREAM_DETAILED_VGA: + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_VGA_CONVERTER; + break; + case DOWN_STREAM_DETAILED_DVI: + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_DVI_CONVERTER; + break; + case DOWN_STREAM_DETAILED_HDMI: + case DOWN_STREAM_DETAILED_DP_PLUS_PLUS: + /*Handle DP++ active converter case, process DP++ case as HDMI case according DP1.4 spec*/ + link->dpcd_caps.dongle_type = + DISPLAY_DONGLE_DP_HDMI_CONVERTER; + + link->dpcd_caps.dongle_caps.dongle_type = link->dpcd_caps.dongle_type; + if (ds_port.fields.DETAILED_CAPS) { + + union dwnstream_port_caps_byte3_hdmi + hdmi_caps = {.raw = det_caps[3] }; + union dwnstream_port_caps_byte2 + hdmi_color_caps = {.raw = det_caps[2] }; + link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz = + det_caps[1] * 2500; + + link->dpcd_caps.dongle_caps.is_dp_hdmi_s3d_converter = + hdmi_caps.bits.FRAME_SEQ_TO_FRAME_PACK; + /*YCBCR capability only for HDMI case*/ + if (port_caps->bits.DWN_STRM_PORTX_TYPE + == DOWN_STREAM_DETAILED_HDMI) { + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_pass_through = + hdmi_caps.bits.YCrCr422_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_pass_through = + hdmi_caps.bits.YCrCr420_PASS_THROUGH; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr422_converter = + hdmi_caps.bits.YCrCr422_CONVERSION; + link->dpcd_caps.dongle_caps.is_dp_hdmi_ycbcr420_converter = + hdmi_caps.bits.YCrCr420_CONVERSION; + } + + link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc = + translate_dpcd_max_bpc( + hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT); + + if (link->dc->caps.dp_hdmi21_pcon_support) { + union hdmi_encoded_link_bw hdmi_encoded_link_bw; + + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = + link_bw_kbps_from_raw_frl_link_rate_data( + hdmi_color_caps.bits.MAX_ENCODED_LINK_BW_SUPPORT); + + // Intersect reported max link bw support with the supported link rate post FRL link training + if (core_link_read_dpcd(link, DP_PCON_HDMI_POST_FRL_STATUS, + &hdmi_encoded_link_bw.raw, sizeof(hdmi_encoded_link_bw)) == DC_OK) { + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps = intersect_frl_link_bw_support( + link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps, + hdmi_encoded_link_bw); + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_frl_max_link_bw_in_kbps > 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } + + if (link->dpcd_caps.dongle_caps.dp_hdmi_max_pixel_clk_in_khz != 0) + link->dpcd_caps.dongle_caps.extendedCapValid = true; + } + + break; + } + } + } + + set_dongle_type(link->ddc, link->dpcd_caps.dongle_type); + + { + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + + core_link_read_dpcd( + link, + DP_BRANCH_REVISION_START, + (uint8_t *)&dp_hw_fw_revision, + sizeof(dp_hw_fw_revision)); + + link->dpcd_caps.branch_hw_revision = + dp_hw_fw_revision.ieee_hw_rev; + + memmove( + link->dpcd_caps.branch_fw_revision, + dp_hw_fw_revision.ieee_fw_rev, + sizeof(dp_hw_fw_revision.ieee_fw_rev)); + } + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && + link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + union dp_dfp_cap_ext dfp_cap_ext; + memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext)); + core_link_read_dpcd( + link, + DP_DFP_CAPABILITY_EXTENSION_SUPPORT, + dfp_cap_ext.raw, + sizeof(dfp_cap_ext.raw)); + link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported; + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps = + dfp_cap_ext.fields.max_pixel_rate_in_mps[0] + + (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width = + dfp_cap_ext.fields.max_video_h_active_width[0] + + (dfp_cap_ext.fields.max_video_h_active_width[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height = + dfp_cap_ext.fields.max_video_v_active_height[0] + + (dfp_cap_ext.fields.max_video_v_active_height[1] << 8); + link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps = + dfp_cap_ext.fields.encoding_format_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps = + dfp_cap_ext.fields.rgb_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps = + dfp_cap_ext.fields.ycbcr444_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps = + dfp_cap_ext.fields.ycbcr422_color_depth_caps; + link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps = + dfp_cap_ext.fields.ycbcr420_color_depth_caps; + DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index); + DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false"); + DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps); + DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); + DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); + } +} + +static void apply_usbc_combo_phy_reset_wa(struct dc_link *link, + struct dc_link_settings *link_settings) +{ + /* Temporary Renoir-specific workaround PHY will sometimes be in bad + * state on hotplugging display from certain USB-C dongle, so add extra + * cycle of enabling and disabling the PHY before first link training. + */ + struct link_resource link_res = {0}; + enum clock_source_id dp_cs_id = get_clock_source_id(link); + + dp_enable_link_phy(link, &link_res, link->connector_signal, + dp_cs_id, link_settings); + dp_disable_link_phy(link, &link_res, link->connector_signal); +} + +bool dp_overwrite_extended_receiver_cap(struct dc_link *link) +{ + uint8_t dpcd_data[16]; + uint32_t read_dpcd_retry_cnt = 3; + enum dc_status status = DC_ERROR_UNEXPECTED; + union dp_downstream_port_present ds_port = { 0 }; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + + int i; + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + get_active_converter_info(ds_port.byte, link); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = dpcd_data[ + DP_MAX_LINK_RATE - DP_DPCD_REV]; + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + + return true; +} + +void dpcd_set_source_specific_data(struct dc_link *link) +{ + if (!link->dc->vendor_signature.is_valid) { + enum dc_status __maybe_unused result_write_min_hblank = DC_NOT_SUPPORTED; + struct dpcd_amd_signature amd_signature = {0}; + struct dpcd_amd_device_id amd_device_id = {0}; + + amd_device_id.device_id_byte1 = + (uint8_t)(link->ctx->asic_id.chip_id); + amd_device_id.device_id_byte2 = + (uint8_t)(link->ctx->asic_id.chip_id >> 8); + amd_device_id.dce_version = + (uint8_t)(link->ctx->dce_version); + amd_device_id.dal_version_byte1 = 0x0; // needed? where to get? + amd_device_id.dal_version_byte2 = 0x0; // needed? where to get? + + core_link_read_dpcd(link, DP_SOURCE_OUI, + (uint8_t *)(&amd_signature), + sizeof(amd_signature)); + + if (!((amd_signature.AMD_IEEE_TxSignature_byte1 == 0x0) && + (amd_signature.AMD_IEEE_TxSignature_byte2 == 0x0) && + (amd_signature.AMD_IEEE_TxSignature_byte3 == 0x1A))) { + + amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0; + amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A; + + core_link_write_dpcd(link, DP_SOURCE_OUI, + (uint8_t *)(&amd_signature), + sizeof(amd_signature)); + } + + core_link_write_dpcd(link, DP_SOURCE_OUI+0x03, + (uint8_t *)(&amd_device_id), + sizeof(amd_device_id)); + + if (link->ctx->dce_version >= DCN_VERSION_2_0 && + link->dc->caps.min_horizontal_blanking_period != 0) { + + uint8_t hblank_size = (uint8_t)link->dc->caps.min_horizontal_blanking_period; + + result_write_min_hblank = core_link_write_dpcd(link, + DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, (uint8_t *)(&hblank_size), + sizeof(hblank_size)); + } + DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_INFORMATION, + WPP_BIT_FLAG_DC_DETECTION_DP_CAPS, + "result=%u link_index=%u enum dce_version=%d DPCD=0x%04X min_hblank=%u branch_dev_id=0x%x branch_dev_name='%c%c%c%c%c%c'", + result_write_min_hblank, + link->link_index, + link->ctx->dce_version, + DP_SOURCE_MINIMUM_HBLANK_SUPPORTED, + link->dc->caps.min_horizontal_blanking_period, + link->dpcd_caps.branch_dev_id, + link->dpcd_caps.branch_dev_name[0], + link->dpcd_caps.branch_dev_name[1], + link->dpcd_caps.branch_dev_name[2], + link->dpcd_caps.branch_dev_name[3], + link->dpcd_caps.branch_dev_name[4], + link->dpcd_caps.branch_dev_name[5]); + } else { + core_link_write_dpcd(link, DP_SOURCE_OUI, + link->dc->vendor_signature.data.raw, + sizeof(link->dc->vendor_signature.data.raw)); + } +} + +void dpcd_write_cable_id_to_dprx(struct dc_link *link) +{ + if (!link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED || + link->dpcd_caps.cable_id.raw == 0 || + link->dprx_states.cable_id_written) + return; + + core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, + &link->dpcd_caps.cable_id.raw, + sizeof(link->dpcd_caps.cable_id.raw)); + + link->dprx_states.cable_id_written = 1; +} + +static bool get_usbc_cable_id(struct dc_link *link, union dp_cable_id *cable_id) +{ + union dmub_rb_cmd cmd; + + if (!link->ctx->dmub_srv || + link->ep_type != DISPLAY_ENDPOINT_PHY || + link->link_enc->features.flags.bits.DP_IS_USB_C == 0) + return false; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cable_id.header.type = DMUB_CMD_GET_USBC_CABLE_ID; + cmd.cable_id.header.payload_bytes = sizeof(cmd.cable_id.data); + cmd.cable_id.data.input.phy_inst = resource_transmitter_to_phy_idx( + link->dc, link->link_enc->transmitter); + if (dm_execute_dmub_cmd(link->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && + cmd.cable_id.header.ret_status == 1) { + cable_id->raw = cmd.cable_id.data.output_raw; + DC_LOG_DC("usbc_cable_id = %d.\n", cable_id->raw); + } + return cmd.cable_id.header.ret_status == 1; +} + +static void retrieve_cable_id(struct dc_link *link) +{ + union dp_cable_id usbc_cable_id; + + link->dpcd_caps.cable_id.raw = 0; + core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, + &link->dpcd_caps.cable_id.raw, sizeof(uint8_t)); + + if (get_usbc_cable_id(link, &usbc_cable_id)) + link->dpcd_caps.cable_id = intersect_cable_id( + &link->dpcd_caps.cable_id, &usbc_cable_id); +} + +bool read_is_mst_supported(struct dc_link *link) +{ + bool mst = false; + enum dc_status st = DC_OK; + union dpcd_rev rev; + union mstm_cap cap; + + if (link->preferred_training_settings.mst_enable && + *link->preferred_training_settings.mst_enable == false) { + return false; + } + + rev.raw = 0; + cap.raw = 0; + + st = core_link_read_dpcd(link, DP_DPCD_REV, &rev.raw, + sizeof(rev)); + + if (st == DC_OK && rev.raw >= DPCD_REV_12) { + + st = core_link_read_dpcd(link, DP_MSTM_CAP, + &cap.raw, sizeof(cap)); + if (st == DC_OK && cap.bits.MST_CAP == 1) + mst = true; + } + return mst; + +} + +/* Read additional sink caps defined in source specific DPCD area + * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP) + * TODO: Add FS caps and read from DP_SOURCE_SINK_FS_CAP as well + */ +static bool dpcd_read_sink_ext_caps(struct dc_link *link) +{ + uint8_t dpcd_data = 0; + uint8_t edp_general_cap2 = 0; + + if (!link) + return false; + + if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK) + return false; + + link->dpcd_sink_ext_caps.raw = dpcd_data; + + if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) + return false; + + link->dpcd_caps.panel_luminance_control = (edp_general_cap2 & DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) != 0; + + return true; +} + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) +{ + uint8_t lttpr_dpcd_data[8]; + enum dc_status status; + bool is_lttpr_present; + + /* Logic to determine LTTPR support*/ + bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; + + if (!vbios_lttpr_interop || !link->dc->caps.extended_aux_timeout_support) + return DC_NOT_SUPPORTED; + + /* By reading LTTPR capability, RX assumes that we will enable + * LTTPR extended aux timeout if LTTPR is present. + */ + status = core_link_read_dpcd( + link, + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, + lttpr_dpcd_data, + sizeof(lttpr_dpcd_data)); + + link->dpcd_caps.lttpr_caps.revision.raw = + lttpr_dpcd_data[DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_link_rate = + lttpr_dpcd_data[DP_MAX_LINK_RATE_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = + lttpr_dpcd_data[DP_PHY_REPEATER_CNT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_lane_count = + lttpr_dpcd_data[DP_MAX_LANE_COUNT_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.mode = + lttpr_dpcd_data[DP_PHY_REPEATER_MODE - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.max_ext_timeout = + lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = + lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = + lttpr_dpcd_data[DP_PHY_REPEATER_128B132B_RATES - + DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + + /* If this chip cap is set, at least one retimer must exist in the chain + * Override count to 1 if we receive a known bad count (0 or an invalid value) */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { + ASSERT(0); + link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; + DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + } + + /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ + is_lttpr_present = dp_is_lttpr_present(link); + + if (is_lttpr_present) + CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); + + DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); + return status; +} + +static bool retrieve_link_cap(struct dc_link *link) +{ + /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, + * which means size 16 will be good for both of those DPCD register block reads + */ + uint8_t dpcd_data[16]; + /*Only need to read 1 byte starting from DP_DPRX_FEATURE_ENUMERATION_LIST. + */ + uint8_t dpcd_dprx_data = '\0'; + + struct dp_device_vendor_id sink_id; + union down_stream_port_count down_strm_port_count; + union edp_configuration_cap edp_config_cap; + union dp_downstream_port_present ds_port = { 0 }; + enum dc_status status = DC_ERROR_UNEXPECTED; + uint32_t read_dpcd_retry_cnt = 3; + int i; + struct dp_sink_hw_fw_revision dp_hw_fw_revision; + const uint32_t post_oui_delay = 30; // 30ms + bool is_fec_supported = false; + bool is_dsc_basic_supported = false; + bool is_dsc_passthrough_supported = false; + + memset(dpcd_data, '\0', sizeof(dpcd_data)); + memset(&down_strm_port_count, + '\0', sizeof(union down_stream_port_count)); + memset(&edp_config_cap, '\0', + sizeof(union edp_configuration_cap)); + + /* if extended timeout is supported in hardware, + * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer + * CTS 4.2.1.1 regression introduced by CTS specs requirement update. + */ + try_to_configure_aux_timeout(link->ddc, + LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + + status = dp_retrieve_lttpr_cap(link); + + if (status != DC_OK) { + status = wake_up_aux_channel(link); + if (status == DC_OK) + dp_retrieve_lttpr_cap(link); + else + return false; + } + + if (dp_is_lttpr_present(link)) + configure_lttpr_mode_transparent(link); + + /* Read DP tunneling information. */ + status = dpcd_get_tunneling_device_data(link); + + dpcd_set_source_specific_data(link); + /* Sink may need to configure internals based on vendor, so allow some + * time before proceeding with possibly vendor specific transactions + */ + msleep(post_oui_delay); + + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPCD_REV, + dpcd_data, + sizeof(dpcd_data)); + if (status == DC_OK) + break; + } + + + if (status != DC_OK) { + dm_error("%s: Read receiver caps dpcd data failed.\n", __func__); + return false; + } + + if (!dp_is_lttpr_present(link)) + try_to_configure_aux_timeout(link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + + + { + union training_aux_rd_interval aux_rd_interval; + + aux_rd_interval.raw = + dpcd_data[DP_TRAINING_AUX_RD_INTERVAL]; + + link->dpcd_caps.ext_receiver_cap_field_present = + aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1; + + if (aux_rd_interval.bits.EXT_RECEIVER_CAP_FIELD_PRESENT == 1) { + uint8_t ext_cap_data[16]; + + memset(ext_cap_data, '\0', sizeof(ext_cap_data)); + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DP13_DPCD_REV, + ext_cap_data, + sizeof(ext_cap_data)); + if (status == DC_OK) { + memcpy(dpcd_data, ext_cap_data, sizeof(dpcd_data)); + break; + } + } + if (status != DC_OK) + dm_error("%s: Read extend caps data failed, use cap from dpcd 0.\n", __func__); + } + } + + link->dpcd_caps.dpcd_rev.raw = + dpcd_data[DP_DPCD_REV - DP_DPCD_REV]; + + if (link->dpcd_caps.ext_receiver_cap_field_present) { + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, + DP_DPRX_FEATURE_ENUMERATION_LIST, + &dpcd_dprx_data, + sizeof(dpcd_dprx_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.dprx_feature.raw = dpcd_dprx_data; + + if (status != DC_OK) + dm_error("%s: Read DPRX caps data failed.\n", __func__); + + /* AdaptiveSyncCapability */ + dpcd_dprx_data = 0; + for (i = 0; i < read_dpcd_retry_cnt; i++) { + status = core_link_read_dpcd( + link, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, + &dpcd_dprx_data, sizeof(dpcd_dprx_data)); + if (status == DC_OK) + break; + } + + link->dpcd_caps.adaptive_sync_caps.dp_adap_sync_caps.raw = dpcd_dprx_data; + + if (status != DC_OK) + dm_error("%s: Read DPRX caps data failed. Addr:%#x\n", + __func__, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1); + } + + else { + link->dpcd_caps.dprx_feature.raw = 0; + } + + + /* Error condition checking... + * It is impossible for Sink to report Max Lane Count = 0. + * It is possible for Sink to report Max Link Rate = 0, if it is + * an eDP device that is reporting specialized link rates in the + * SUPPORTED_LINK_RATE table. + */ + if (dpcd_data[DP_MAX_LANE_COUNT - DP_DPCD_REV] == 0) + return false; + + ds_port.byte = dpcd_data[DP_DOWNSTREAMPORT_PRESENT - + DP_DPCD_REV]; + + read_dp_device_vendor_id(link); + + /* TODO - decouple raw mst capability from policy decision */ + link->dpcd_caps.is_mst_capable = read_is_mst_supported(link); + DC_LOG_DC("%s: MST_Support: %s\n", __func__, str_yes_no(link->dpcd_caps.is_mst_capable)); + + get_active_converter_info(ds_port.byte, link); + + dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); + + down_strm_port_count.raw = dpcd_data[DP_DOWN_STREAM_PORT_COUNT - + DP_DPCD_REV]; + + link->dpcd_caps.allow_invalid_MSA_timing_param = + down_strm_port_count.bits.IGNORE_MSA_TIMING_PARAM; + + link->dpcd_caps.max_ln_count.raw = dpcd_data[ + DP_MAX_LANE_COUNT - DP_DPCD_REV]; + + link->dpcd_caps.max_down_spread.raw = dpcd_data[ + DP_MAX_DOWNSPREAD - DP_DPCD_REV]; + + link->reported_link_cap.lane_count = + link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; + link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( + dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); + link->reported_link_cap.link_spread = + link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; + + edp_config_cap.raw = dpcd_data[ + DP_EDP_CONFIGURATION_CAP - DP_DPCD_REV]; + link->dpcd_caps.panel_mode_edp = + edp_config_cap.bits.ALT_SCRAMBLER_RESET; + link->dpcd_caps.dpcd_display_control_capable = + edp_config_cap.bits.DPCD_DISPLAY_CONTROL_CAPABLE; + link->dpcd_caps.channel_coding_cap.raw = + dpcd_data[DP_MAIN_LINK_CHANNEL_CODING - DP_DPCD_REV]; + link->test_pattern_enabled = false; + link->compliance_test_state.raw = 0; + + /* read sink count */ + core_link_read_dpcd(link, + DP_SINK_COUNT, + &link->dpcd_caps.sink_count.raw, + sizeof(link->dpcd_caps.sink_count.raw)); + + /* read sink ieee oui */ + core_link_read_dpcd(link, + DP_SINK_OUI, + (uint8_t *)(&sink_id), + sizeof(sink_id)); + + link->dpcd_caps.sink_dev_id = + (sink_id.ieee_oui[0] << 16) + + (sink_id.ieee_oui[1] << 8) + + (sink_id.ieee_oui[2]); + + memmove( + link->dpcd_caps.sink_dev_id_str, + sink_id.ieee_device_id, + sizeof(sink_id.ieee_device_id)); + + core_link_read_dpcd( + link, + DP_SINK_HW_REVISION_START, + (uint8_t *)&dp_hw_fw_revision, + sizeof(dp_hw_fw_revision)); + + link->dpcd_caps.sink_hw_revision = + dp_hw_fw_revision.ieee_hw_rev; + + memmove( + link->dpcd_caps.sink_fw_revision, + dp_hw_fw_revision.ieee_fw_rev, + sizeof(dp_hw_fw_revision.ieee_fw_rev)); + + /* Quirk for Retina panels: wrong DP_MAX_LINK_RATE */ + { + uint8_t str_mbp_2018[] = { 101, 68, 21, 103, 98, 97 }; + uint8_t fwrev_mbp_2018[] = { 7, 4 }; + uint8_t fwrev_mbp_2018_vega[] = { 8, 4 }; + + /* We also check for the firmware revision as 16,1 models have an + * identical device id and are incorrectly quirked otherwise. + */ + if ((link->dpcd_caps.sink_dev_id == 0x0010fa) && + !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2018, + sizeof(str_mbp_2018)) && + (!memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018, + sizeof(fwrev_mbp_2018)) || + !memcmp(link->dpcd_caps.sink_fw_revision, fwrev_mbp_2018_vega, + sizeof(fwrev_mbp_2018_vega)))) { + link->reported_link_cap.link_rate = LINK_RATE_RBR2; + } + } + + memset(&link->dpcd_caps.dsc_caps, '\0', + sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + /* Read DSC and FEC sink capabilities if DP revision is 1.4 and up */ + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14) { + status = core_link_read_dpcd( + link, + DP_FEC_CAPABILITY, + &link->dpcd_caps.fec_cap.raw, + sizeof(link->dpcd_caps.fec_cap.raw)); + status = core_link_read_dpcd( + link, + DP_DSC_SUPPORT, + link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); + if (status == DC_OK) { + is_fec_supported = link->dpcd_caps.fec_cap.bits.FEC_CAPABLE; + is_dsc_basic_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT; + is_dsc_passthrough_supported = link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT; + DC_LOG_DC("%s: FEC_Sink_Support: %s\n", __func__, + str_yes_no(is_fec_supported)); + DC_LOG_DC("%s: DSC_Basic_Sink_Support: %s\n", __func__, + str_yes_no(is_dsc_basic_supported)); + DC_LOG_DC("%s: DSC_Passthrough_Sink_Support: %s\n", __func__, + str_yes_no(is_dsc_passthrough_supported)); + } + if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { + status = core_link_read_dpcd( + link, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); + DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0); + DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1); + DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", + link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); + } + + /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode + * only if required. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->dc->debug.dpia_debug.bits.enable_force_tbt3_work_around && + link->dpcd_caps.is_branch_dev && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && + (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { + /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. + * Clear FEC and DSC capabilities as a work around if that is not the case. + */ + link->wa_flags.dpia_forced_tbt3_mode = true; + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); + } else + link->wa_flags.dpia_forced_tbt3_mode = false; + } + + if (!dpcd_read_sink_ext_caps(link)) + link->dpcd_sink_ext_caps.raw = 0; + + if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index); + + core_link_read_dpcd(link, + DP_128B132B_SUPPORTED_LINK_RATES, + &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw, + sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw)); + if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20) + link->reported_link_cap.link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) + link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10) + link->reported_link_cap.link_rate = LINK_RATE_UHBR10; + else + dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__); + DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index); + DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz", + link->reported_link_cap.link_rate / 100, + link->reported_link_cap.link_rate % 100); + + core_link_read_dpcd(link, + DP_SINK_VIDEO_FALLBACK_FORMATS, + &link->dpcd_caps.fallback_formats.raw, + sizeof(link->dpcd_caps.fallback_formats.raw)); + DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index); + if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support) + DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support) + DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support) + DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported"); + if (link->dpcd_caps.fallback_formats.raw == 0) { + DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported"); + link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1; + } + + core_link_read_dpcd(link, + DP_FEC_CAPABILITY_1, + &link->dpcd_caps.fec_cap1.raw, + sizeof(link->dpcd_caps.fec_cap1.raw)); + DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index); + if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) + DC_LOG_DP2("\tFEC aggregated error counters are supported"); + } + + retrieve_cable_id(link); + dpcd_write_cable_id_to_dprx(link); + + /* Connectivity log: detection */ + CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); + + return true; +} + +bool detect_dp_sink_caps(struct dc_link *link) +{ + return retrieve_link_cap(link); +} + +void detect_edp_sink_caps(struct dc_link *link) +{ + uint8_t supported_link_rates[16]; + uint32_t entry; + uint32_t link_rate_in_khz; + enum dc_link_rate link_rate = LINK_RATE_UNKNOWN; + uint8_t backlight_adj_cap; + uint8_t general_edp_cap; + + retrieve_link_cap(link); + link->dpcd_caps.edp_supported_link_rates_count = 0; + memset(supported_link_rates, 0, sizeof(supported_link_rates)); + + /* + * edp_supported_link_rates_count is only valid for eDP v1.4 or higher. + * Per VESA eDP spec, "The DPCD revision for eDP v1.4 is 13h" + */ + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13) { + // Read DPCD 00010h - 0001Fh 16 bytes at one shot + core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + for (entry = 0; entry < 16; entry += 2) { + // DPCD register reports per-lane link rate = 16-bit link rate capability + // value X 200 kHz. Need multiplier to find link rate in kHz. + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + + DC_LOG_DC("%s: eDP v1.4 supported sink rates: [%d] %d kHz\n", __func__, + entry / 2, link_rate_in_khz); + + if (link_rate_in_khz != 0) { + link_rate = linkRateInKHzToLinkRateMultiplier(link_rate_in_khz); + link->dpcd_caps.edp_supported_link_rates[link->dpcd_caps.edp_supported_link_rates_count] = link_rate; + link->dpcd_caps.edp_supported_link_rates_count++; + } + } + } + + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, + &backlight_adj_cap, sizeof(backlight_adj_cap)); + + link->dpcd_caps.dynamic_backlight_capable_edp = + (backlight_adj_cap & DP_EDP_DYNAMIC_BACKLIGHT_CAP) ? true:false; + + core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_1, + &general_edp_cap, sizeof(general_edp_cap)); + + link->dpcd_caps.set_power_state_capable_edp = + (general_edp_cap & DP_EDP_SET_POWER_CAP) ? true:false; + + set_default_brightness_aux(link); + + core_link_read_dpcd(link, DP_EDP_DPCD_REV, + &link->dpcd_caps.edp_rev, + sizeof(link->dpcd_caps.edp_rev)); + /* + * PSR is only valid for eDP v1.3 or higher. + */ + if (link->dpcd_caps.edp_rev >= DP_EDP_13) { + core_link_read_dpcd(link, DP_PSR_SUPPORT, + &link->dpcd_caps.psr_info.psr_version, + sizeof(link->dpcd_caps.psr_info.psr_version)); + if (link->dpcd_caps.sink_dev_id == DP_BRANCH_DEVICE_ID_001CF8) + core_link_read_dpcd(link, DP_FORCE_PSRSU_CAPABILITY, + &link->dpcd_caps.psr_info.force_psrsu_cap, + sizeof(link->dpcd_caps.psr_info.force_psrsu_cap)); + core_link_read_dpcd(link, DP_PSR_CAPS, + &link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + sizeof(link->dpcd_caps.psr_info.psr_dpcd_caps.raw)); + if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) { + core_link_read_dpcd(link, DP_PSR2_SU_Y_GRANULARITY, + &link->dpcd_caps.psr_info.psr2_su_y_granularity_cap, + sizeof(link->dpcd_caps.psr_info.psr2_su_y_granularity_cap)); + } + } + + /* + * ALPM is only valid for eDP v1.4 or higher. + */ + if (link->dpcd_caps.dpcd_rev.raw >= DP_EDP_14) + core_link_read_dpcd(link, DP_RECEIVER_ALPM_CAP, + &link->dpcd_caps.alpm_caps.raw, + sizeof(link->dpcd_caps.alpm_caps.raw)); + + /* + * Read REPLAY info + */ + core_link_read_dpcd(link, DP_SINK_PR_PIXEL_DEVIATION_PER_LINE, + &link->dpcd_caps.pr_info.pixel_deviation_per_line, + sizeof(link->dpcd_caps.pr_info.pixel_deviation_per_line)); + core_link_read_dpcd(link, DP_SINK_PR_MAX_NUMBER_OF_DEVIATION_LINE, + &link->dpcd_caps.pr_info.max_deviation_line, + sizeof(link->dpcd_caps.pr_info.max_deviation_line)); +} + +bool dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) +{ + struct link_encoder *link_enc = NULL; + + if (!max_link_enc_cap) { + DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__); + return false; + } + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (link_enc && link_enc->funcs->get_max_link_cap) { + link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap); + return true; + } + + DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__); + max_link_enc_cap->lane_count = 1; + max_link_enc_cap->link_rate = 6; + return false; +} + +const struct dc_link_settings *dp_get_verified_link_cap( + const struct dc_link *link) +{ + if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN && + link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) + return &link->preferred_link_setting; + return &link->verified_link_cap; +} + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) +{ + struct dc_link_settings max_link_cap = {0}; + enum dc_link_rate lttpr_max_link_rate; + enum dc_link_rate cable_max_link_rate; + struct link_encoder *link_enc = NULL; + + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + /* get max link encoder capability */ + if (link_enc) + link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); + + /* Lower link settings based on sink's link cap */ + if (link->reported_link_cap.lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = + link->reported_link_cap.lane_count; + if (link->reported_link_cap.link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = + link->reported_link_cap.link_rate; + if (link->reported_link_cap.link_spread < + max_link_cap.link_spread) + max_link_cap.link_spread = + link->reported_link_cap.link_spread; + + /* Lower link settings based on cable attributes + * Cable ID is a DP2 feature to identify max certified link rate that + * a cable can carry. The cable identification method requires both + * cable and display hardware support. Since the specs comes late, it is + * anticipated that the first round of DP2 cables and displays may not + * be fully compatible to reliably return cable ID data. Therefore the + * decision of our cable id policy is that if the cable can return non + * zero cable id data, we will take cable's link rate capability into + * account. However if we get zero data, the cable link rate capability + * is considered inconclusive. In this case, we will not take cable's + * capability into account to avoid of over limiting hardware capability + * from users. The max overall link rate capability is still determined + * after actual dp pre-training. Cable id is considered as an auxiliary + * method of determining max link bandwidth capability. + */ + cable_max_link_rate = get_cable_max_link_rate(link); + + if (!link->dc->debug.ignore_cable_id && + cable_max_link_rate != LINK_RATE_UNKNOWN && + cable_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = cable_max_link_rate; + + /* account for lttpr repeaters cap + * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). + */ + if (dp_is_lttpr_present(link)) { + if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) + max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; + lttpr_max_link_rate = get_lttpr_max_link_rate(link); + + if (lttpr_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = lttpr_max_link_rate; + + DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", + __func__, + max_link_cap.lane_count, + max_link_cap.link_rate); + } + + if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && + link->dc->debug.disable_uhbr) + max_link_cap.link_rate = LINK_RATE_HIGH3; + + return max_link_cap; +} + +static bool dp_verify_link_cap( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int *fail_count) +{ + struct dc_link_settings cur_link_settings = {0}; + struct dc_link_settings max_link_settings = *known_limit_link_setting; + bool success = false; + bool skip_video_pattern; + enum clock_source_id dp_cs_id = get_clock_source_id(link); + enum link_training_result status = LINK_TRAINING_SUCCESS; + union hpd_irq_data irq_data; + struct link_resource link_res; + + memset(&irq_data, 0, sizeof(irq_data)); + cur_link_settings = max_link_settings; + + /* Grant extended timeout request */ + if (dp_is_lttpr_present(link) && link->dpcd_caps.lttpr_caps.max_ext_timeout > 0) { + uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80; + + core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); + } + + do { + if (!get_temp_dp_link_res(link, &link_res, &cur_link_settings)) + continue; + + skip_video_pattern = cur_link_settings.link_rate != LINK_RATE_LOW; + dp_enable_link_phy( + link, + &link_res, + link->connector_signal, + dp_cs_id, + &cur_link_settings); + + status = dp_perform_link_training( + link, + &link_res, + &cur_link_settings, + skip_video_pattern); + + if (status == LINK_TRAINING_SUCCESS) { + success = true; + fsleep(1000); + if (dp_read_hpd_rx_irq_data(link, &irq_data) == DC_OK && + dp_parse_link_loss_status( + link, + &irq_data)) + (*fail_count)++; + } else if (status == LINK_TRAINING_LINK_LOSS) { + success = true; + (*fail_count)++; + } else { + (*fail_count)++; + } + dp_trace_lt_total_count_increment(link, true); + dp_trace_lt_result_update(link, status, true); + dp_disable_link_phy(link, &link_res, link->connector_signal); + } while (!success && decide_fallback_link_setting(link, + &max_link_settings, &cur_link_settings, status)); + + link->verified_link_cap = success ? + cur_link_settings : fail_safe_link_settings; + return success; +} + +bool dp_verify_link_cap_with_retries( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int attempts) +{ + int i = 0; + bool success = false; + int fail_count = 0; + struct dc_link_settings last_verified_link_cap = fail_safe_link_settings; + + dp_trace_detect_lt_init(link); + + if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C && + link->dc->debug.usbc_combo_phy_reset_wa) + apply_usbc_combo_phy_reset_wa(link, known_limit_link_setting); + + dp_trace_set_lt_start_timestamp(link, false); + for (i = 0; i < attempts; i++) { + enum dc_connection_type type = dc_connection_none; + + memset(&link->verified_link_cap, 0, + sizeof(struct dc_link_settings)); + if (!link_detect_connection_type(link, &type) || type == dc_connection_none) { + link->verified_link_cap = fail_safe_link_settings; + break; + } else if (dp_verify_link_cap(link, known_limit_link_setting, &fail_count)) { + last_verified_link_cap = link->verified_link_cap; + if (fail_count == 0) { + success = true; + break; + } + } else { + link->verified_link_cap = last_verified_link_cap; + } + fsleep(10 * 1000); + } + + dp_trace_lt_fail_count_update(link, fail_count, true); + dp_trace_set_lt_end_timestamp(link, true); + + return success; +} + +/* + * Check if there is a native DP or passive DP-HDMI dongle connected + */ +bool dp_is_sink_present(struct dc_link *link) +{ + enum gpio_result gpio_result; + uint32_t clock_pin = 0; + uint8_t retry = 0; + struct ddc *ddc; + + enum connector_id connector_id = + dal_graphics_object_id_get_connector_id(link->link_id); + + bool present = + ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || + (connector_id == CONNECTOR_ID_EDP) || + (connector_id == CONNECTOR_ID_USBC)); + + ddc = get_ddc_pin(link->ddc); + + if (!ddc) { + BREAK_TO_DEBUGGER(); + return present; + } + + /* Open GPIO and set it to I2C mode */ + /* Note: this GpioMode_Input will be converted + * to GpioConfigType_I2cAuxDualMode in GPIO component, + * which indicates we need additional delay + */ + + if (dal_ddc_open(ddc, GPIO_MODE_INPUT, + GPIO_DDC_CONFIG_TYPE_MODE_I2C) != GPIO_RESULT_OK) { + dal_ddc_close(ddc); + + return present; + } + + /* + * Read GPIO: DP sink is present if both clock and data pins are zero + * + * [W/A] plug-unplug DP cable, sometimes customer board has + * one short pulse on clk_pin(1V, < 1ms). DP will be config to HDMI/DVI + * then monitor can't br light up. Add retry 3 times + * But in real passive dongle, it need additional 3ms to detect + */ + do { + gpio_result = dal_gpio_get_value(ddc->pin_clock, &clock_pin); + ASSERT(gpio_result == GPIO_RESULT_OK); + if (clock_pin) + fsleep(1000); + else + break; + } while (retry++ < 3); + + present = (gpio_result == GPIO_RESULT_OK) && !clock_pin; + + dal_ddc_close(ddc); + + return present; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h new file mode 100644 index 0000000000..8f0ce97f23 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.h @@ -0,0 +1,107 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_CAPABILITY_H__ +#define __DC_LINK_DP_CAPABILITY_H__ + +#include "link.h" + +bool detect_dp_sink_caps(struct dc_link *link); + +void detect_edp_sink_caps(struct dc_link *link); + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); + +bool dp_get_max_link_enc_cap(const struct dc_link *link, + struct dc_link_settings *max_link_enc_cap); + +const struct dc_link_settings *dp_get_verified_link_cap( + const struct dc_link *link); + +enum dp_link_encoding link_dp_get_encoding_format( + const struct dc_link_settings *link_settings); + +enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link); + +/* Convert PHY repeater count read from DPCD uint8_t. */ +uint8_t dp_parse_lttpr_repeater_count(uint8_t lttpr_repeater_count); + +bool dp_is_sink_present(struct dc_link *link); + +bool dp_is_lttpr_present(struct dc_link *link); + +bool dp_is_fec_supported(const struct dc_link *link); + +bool is_dp_active_dongle(const struct dc_link *link); + +bool is_dp_branch_device(const struct dc_link *link); + +void dpcd_write_cable_id_to_dprx(struct dc_link *link); + +bool dp_should_enable_fec(const struct dc_link *link); + +bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx); + +/* Initialize output parameter lt_settings. */ +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings); + +bool link_decide_link_settings( + struct dc_stream_state *stream, + struct dc_link_settings *link_setting); + +bool edp_decide_link_settings(struct dc_link *link, + struct dc_link_settings *link_setting, uint32_t req_bw); + +bool decide_edp_link_settings_with_dsc(struct dc_link *link, + struct dc_link_settings *link_setting, + uint32_t req_bw, + enum dc_link_rate max_link_rate); + +enum dp_link_encoding mst_decide_link_encoding_format(const struct dc_link *link); + +void dpcd_set_source_specific_data(struct dc_link *link); + +/*query dpcd for version and mst cap addresses*/ +bool read_is_mst_supported(struct dc_link *link); + +bool decide_fallback_link_setting( + struct dc_link *link, + struct dc_link_settings *max, + struct dc_link_settings *cur, + enum link_training_result training_result); + +bool dp_verify_link_cap_with_retries( + struct dc_link *link, + struct dc_link_settings *known_limit_link_setting, + int attempts); + +uint32_t link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); + +bool dp_overwrite_extended_receiver_cap(struct dc_link *link); + +#endif /* __DC_LINK_DP_CAPABILITY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c new file mode 100644 index 0000000000..0bb7491339 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc.h" +#include "inc/core_status.h" +#include "dpcd_defs.h" + +#include "link_dp_dpia.h" +#include "link_hwss.h" +#include "dm_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER \ + link->ctx->logger + +/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */ +/* DPCD DP Tunneling over USB4 */ +#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d +#define DP_IN_ADAPTER_INFO 0xe000e +#define DP_USB4_DRIVER_ID 0xe000f +#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b + +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) +{ + enum dc_status status = DC_OK; + uint8_t dpcd_dp_tun_data[3] = {0}; + uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0}; + uint8_t i = 0; + + status = core_link_read_dpcd( + link, + DP_TUNNELING_CAPABILITIES_SUPPORT, + dpcd_dp_tun_data, + sizeof(dpcd_dp_tun_data)); + + status = core_link_read_dpcd( + link, + DP_USB4_ROUTER_TOPOLOGY_ID, + dpcd_topology_data, + sizeof(dpcd_topology_data)); + + link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw = + dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT]; + link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id = + dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT]; + + for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++) + link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i]; + + return status; +} + +bool dpia_query_hpd_status(struct dc_link *link) +{ + union dmub_rb_cmd cmd = {0}; + struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv; + bool is_hpd_high = false; + + /* prepare QUERY_HPD command */ + cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE; + cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1; + cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA; + + /* Return HPD status reported by DMUB if query successfully executed. */ + if (dm_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) && cmd.query_hpd.data.status == AUX_RET_SUCCESS) + is_hpd_high = cmd.query_hpd.data.result; + + DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n", + __func__, + link->link_index, + link->link_id.enum_id - ENUM_ID_1, + cmd.query_hpd.data.status, + cmd.query_hpd.data.result); + + return is_hpd_high; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h new file mode 100644 index 0000000000..363f45a1a9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DPIA_H__ +#define __DC_LINK_DPIA_H__ + +#include "link.h" + +/* Read tunneling device capability from DPCD and update link capability + * accordingly. + */ +enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link); + +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dpia_query_hpd_status(struct dc_link *link); +#endif /* __DC_LINK_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c new file mode 100644 index 0000000000..7581023daa --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -0,0 +1,492 @@ + +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +/*********************************************************************/ +// USB4 DPIA BANDWIDTH ALLOCATION LOGIC +/*********************************************************************/ +#include "link_dp_dpia_bw.h" +#include "link_dpcd.h" +#include "dc_dmub_srv.h" + +#define DC_LOGGER \ + link->ctx->logger + +#define Kbps_TO_Gbps (1000 * 1000) + +// ------------------------------------------------------------------ +// PRIVATE FUNCTIONS +// ------------------------------------------------------------------ +/* + * Always Check the following: + * - Is it USB4 link? + * - Is HPD HIGH? + * - Is BW Allocation Support Mode enabled on DP-Tx? + */ +static bool get_bw_alloc_proceed_flag(struct dc_link *tmp) +{ + return (tmp && DISPLAY_ENDPOINT_USB4_DPIA == tmp->ep_type + && tmp->hpd_status + && tmp->dpia_bw_alloc_config.bw_alloc_enabled); +} +static void reset_bw_alloc_struct(struct dc_link *link) +{ + link->dpia_bw_alloc_config.bw_alloc_enabled = false; + link->dpia_bw_alloc_config.sink_verified_bw = 0; + link->dpia_bw_alloc_config.sink_max_bw = 0; + link->dpia_bw_alloc_config.estimated_bw = 0; + link->dpia_bw_alloc_config.bw_granularity = 0; + link->dpia_bw_alloc_config.response_ready = false; +} +static uint8_t get_bw_granularity(struct dc_link *link) +{ + uint8_t bw_granularity = 0; + + core_link_read_dpcd( + link, + DP_BW_GRANULALITY, + &bw_granularity, + sizeof(uint8_t)); + + switch (bw_granularity & 0x3) { + case 0: + bw_granularity = 4; + break; + case 1: + default: + bw_granularity = 2; + break; + } + + return bw_granularity; +} +static int get_estimated_bw(struct dc_link *link) +{ + uint8_t bw_estimated_bw = 0; + + core_link_read_dpcd( + link, + ESTIMATED_BW, + &bw_estimated_bw, + sizeof(uint8_t)); + + return bw_estimated_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); +} +static bool allocate_usb4_bw(int *stream_allocated_bw, int bw_needed, struct dc_link *link) +{ + if (bw_needed > 0) + *stream_allocated_bw += bw_needed; + + return true; +} +static bool deallocate_usb4_bw(int *stream_allocated_bw, int bw_to_dealloc, struct dc_link *link) +{ + bool ret = false; + + if (*stream_allocated_bw > 0) { + *stream_allocated_bw -= bw_to_dealloc; + ret = true; + } else { + //Do nothing for now + ret = true; + } + + // Unplug so reset values + if (!link->hpd_status) + reset_bw_alloc_struct(link); + + return ret; +} +/* + * Read all New BW alloc configuration ex: estimated_bw, allocated_bw, + * granuality, Driver_ID, CM_Group, & populate the BW allocation structs + * for host router and dpia + */ +static void init_usb4_bw_struct(struct dc_link *link) +{ + // Init the known values + link->dpia_bw_alloc_config.bw_granularity = get_bw_granularity(link); + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); +} +static uint8_t get_lowest_dpia_index(struct dc_link *link) +{ + const struct dc *dc_struct = link->dc; + uint8_t idx = 0xFF; + int i; + + for (i = 0; i < MAX_PIPES * 2; ++i) { + + if (!dc_struct->links[i] || + dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + continue; + + if (idx > dc_struct->links[i]->link_index) + idx = dc_struct->links[i]->link_index; + } + + return idx; +} +/* + * Get the Max Available BW or Max Estimated BW for each Host Router + * + * @link: pointer to the dc_link struct instance + * @type: ESTIMATD BW or MAX AVAILABLE BW + * + * return: response_ready flag from dc_link struct + */ +static int get_host_router_total_bw(struct dc_link *link, uint8_t type) +{ + const struct dc *dc_struct = link->dc; + uint8_t lowest_dpia_index = get_lowest_dpia_index(link); + uint8_t idx = (link->link_index - lowest_dpia_index) / 2, idx_temp = 0; + struct dc_link *link_temp; + int total_bw = 0; + int i; + + for (i = 0; i < MAX_PIPES * 2; ++i) { + + if (!dc_struct->links[i] || dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + continue; + + link_temp = dc_struct->links[i]; + if (!link_temp || !link_temp->hpd_status) + continue; + + idx_temp = (link_temp->link_index - lowest_dpia_index) / 2; + + if (idx_temp == idx) { + + if (type == HOST_ROUTER_BW_ESTIMATED) + total_bw += link_temp->dpia_bw_alloc_config.estimated_bw; + else if (type == HOST_ROUTER_BW_ALLOCATED) + total_bw += link_temp->dpia_bw_alloc_config.sink_allocated_bw; + } + } + + return total_bw; +} +/* + * Cleanup function for when the dpia is unplugged to reset struct + * and perform any required clean up + * + * @link: pointer to the dc_link struct instance + * + * return: none + */ +static bool dpia_bw_alloc_unplug(struct dc_link *link) +{ + if (!link) + return true; + + return deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, + link->dpia_bw_alloc_config.sink_allocated_bw, link); +} +static void set_usb4_req_bw_req(struct dc_link *link, int req_bw) +{ + uint8_t requested_bw; + uint32_t temp; + + // 1. Add check for this corner case #1 + if (req_bw > link->dpia_bw_alloc_config.estimated_bw) + req_bw = link->dpia_bw_alloc_config.estimated_bw; + + temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; + requested_bw = temp / Kbps_TO_Gbps; + + // Always make sure to add more to account for floating points + if (temp % Kbps_TO_Gbps) + ++requested_bw; + + // 2. Add check for this corner case #2 + req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + if (req_bw == link->dpia_bw_alloc_config.sink_allocated_bw) + return; + + if (core_link_write_dpcd( + link, + REQUESTED_BW, + &requested_bw, + sizeof(uint8_t)) == DC_OK) + link->dpia_bw_alloc_config.response_ready = false; // Reset flag +} +/* + * Return the response_ready flag from dc_link struct + * + * @link: pointer to the dc_link struct instance + * + * return: response_ready flag from dc_link struct + */ +static bool get_cm_response_ready_flag(struct dc_link *link) +{ + return link->dpia_bw_alloc_config.response_ready; +} +// ------------------------------------------------------------------ +// PUBLIC FUNCTIONS +// ------------------------------------------------------------------ +bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link) +{ + bool ret = false; + uint8_t response = 0, + bw_support_dpia = 0, + bw_support_cm = 0; + + if (!(link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->hpd_status)) + goto out; + + if (core_link_read_dpcd( + link, + DP_TUNNELING_CAPABILITIES, + &response, + sizeof(uint8_t)) == DC_OK) + bw_support_dpia = (response >> 7) & 1; + + if (core_link_read_dpcd( + link, + USB4_DRIVER_BW_CAPABILITY, + &response, + sizeof(uint8_t)) == DC_OK) + bw_support_cm = (response >> 7) & 1; + + /* Send request acknowledgment to Turn ON DPTX support */ + if (bw_support_cm && bw_support_dpia) { + + response = 0x80; + if (core_link_write_dpcd( + link, + DPTX_BW_ALLOCATION_MODE_CONTROL, + &response, + sizeof(uint8_t)) != DC_OK) { + DC_LOG_DEBUG("%s: **** FAILURE Enabling DPtx BW Allocation Mode Support ***\n", + __func__); + } else { + // SUCCESS Enabled DPtx BW Allocation Mode Support + link->dpia_bw_alloc_config.bw_alloc_enabled = true; + DC_LOG_DEBUG("%s: **** SUCCESS Enabling DPtx BW Allocation Mode Support ***\n", + __func__); + + ret = true; + init_usb4_bw_struct(link); + } + } + +out: + return ret; +} +void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result) +{ + int bw_needed = 0; + int estimated = 0; + int host_router_total_estimated_bw = 0; + + if (!get_bw_alloc_proceed_flag((link))) + return; + + switch (result) { + + case DPIA_BW_REQ_FAILED: + + DC_LOG_DEBUG("%s: *** *** BW REQ FAILURE for DP-TX Request *** ***\n", __func__); + + // Update the new Estimated BW value updated by CM + link->dpia_bw_alloc_config.estimated_bw = + bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.estimated_bw); + link->dpia_bw_alloc_config.response_ready = false; + + /* + * If FAIL then it is either: + * 1. Due to DP-Tx trying to allocate more than available i.e. it failed locally + * => get estimated and allocate that + * 2. Due to the fact that DP-Tx tried to allocated ESTIMATED BW and failed then + * CM will have to update 0xE0023 with new ESTIMATED BW value. + */ + break; + + case DPIA_BW_REQ_SUCCESS: + + DC_LOG_DEBUG("%s: *** BW REQ SUCCESS for DP-TX Request ***\n", __func__); + + // 1. SUCCESS 1st time before any Pruning is done + // 2. SUCCESS after prev. FAIL before any Pruning is done + // 3. SUCCESS after Pruning is done but before enabling link + + bw_needed = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + // 1. + if (!link->dpia_bw_alloc_config.sink_allocated_bw) { + + allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, bw_needed, link); + link->dpia_bw_alloc_config.sink_verified_bw = + link->dpia_bw_alloc_config.sink_allocated_bw; + + // SUCCESS from first attempt + if (link->dpia_bw_alloc_config.sink_allocated_bw > + link->dpia_bw_alloc_config.sink_max_bw) + link->dpia_bw_alloc_config.sink_verified_bw = + link->dpia_bw_alloc_config.sink_max_bw; + } + // 3. + else if (link->dpia_bw_alloc_config.sink_allocated_bw) { + + // Find out how much do we need to de-alloc + if (link->dpia_bw_alloc_config.sink_allocated_bw > bw_needed) + deallocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, + link->dpia_bw_alloc_config.sink_allocated_bw - bw_needed, link); + else + allocate_usb4_bw(&link->dpia_bw_alloc_config.sink_allocated_bw, + bw_needed - link->dpia_bw_alloc_config.sink_allocated_bw, link); + } + + // 4. If this is the 2nd sink then any unused bw will be reallocated to master DPIA + // => check if estimated_bw changed + + link->dpia_bw_alloc_config.response_ready = true; + break; + + case DPIA_EST_BW_CHANGED: + + DC_LOG_DEBUG("%s: *** ESTIMATED BW CHANGED for DP-TX Request ***\n", __func__); + + estimated = bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + host_router_total_estimated_bw = get_host_router_total_bw(link, HOST_ROUTER_BW_ESTIMATED); + + // 1. If due to unplug of other sink + if (estimated == host_router_total_estimated_bw) { + // First update the estimated & max_bw fields + if (link->dpia_bw_alloc_config.estimated_bw < estimated) + link->dpia_bw_alloc_config.estimated_bw = estimated; + } + // 2. If due to realloc bw btw 2 dpia due to plug OR realloc unused Bw + else { + // We lost estimated bw usually due to plug event of other dpia + link->dpia_bw_alloc_config.estimated_bw = estimated; + } + break; + + case DPIA_BW_ALLOC_CAPS_CHANGED: + + DC_LOG_DEBUG("%s: *** BW ALLOC CAPABILITY CHANGED for DP-TX Request ***\n", __func__); + link->dpia_bw_alloc_config.bw_alloc_enabled = false; + break; + } +} +int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw) +{ + int ret = 0; + uint8_t timeout = 10; + + if (!(link && DISPLAY_ENDPOINT_USB4_DPIA == link->ep_type + && link->dpia_bw_alloc_config.bw_alloc_enabled)) + goto out; + + //1. Hot Plug + if (link->hpd_status && peak_bw > 0) { + + // If DP over USB4 then we need to check BW allocation + link->dpia_bw_alloc_config.sink_max_bw = peak_bw; + set_usb4_req_bw_req(link, link->dpia_bw_alloc_config.sink_max_bw); + + do { + if (!(timeout > 0)) + timeout--; + else + break; + fsleep(10 * 1000); + } while (!get_cm_response_ready_flag(link)); + + if (!timeout) + ret = 0;// ERROR TIMEOUT waiting for response for allocating bw + else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) + ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); + } + //2. Cold Unplug + else if (!link->hpd_status) + dpia_bw_alloc_unplug(link); + +out: + return ret; +} +int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) +{ + int ret = 0; + uint8_t timeout = 10; + + if (!get_bw_alloc_proceed_flag(link)) + goto out; + + /* + * Sometimes stream uses same timing parameters as the already + * allocated max sink bw so no need to re-alloc + */ + if (req_bw != link->dpia_bw_alloc_config.sink_allocated_bw) { + set_usb4_req_bw_req(link, req_bw); + do { + if (!(timeout > 0)) + timeout--; + else + break; + udelay(10 * 1000); + } while (!get_cm_response_ready_flag(link)); + + if (!timeout) + ret = 0;// ERROR TIMEOUT waiting for response for allocating bw + else if (link->dpia_bw_alloc_config.sink_allocated_bw > 0) + ret = get_host_router_total_bw(link, HOST_ROUTER_BW_ALLOCATED); + } + +out: + return ret; +} +bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) +{ + bool ret = true; + int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }; + uint8_t lowest_dpia_index = 0, dpia_index = 0; + uint8_t i; + + if (!num_dpias || num_dpias > MAX_DPIA_NUM) + return ret; + + //Get total Host Router BW & Validate against each Host Router max BW + for (i = 0; i < num_dpias; ++i) { + + if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled) + continue; + + lowest_dpia_index = get_lowest_dpia_index(link[i]); + if (link[i]->link_index < lowest_dpia_index) + continue; + + dpia_index = (link[i]->link_index - lowest_dpia_index) / 2; + bw_needed_per_hr[dpia_index] += bw_needed_per_dpia[i]; + if (bw_needed_per_hr[dpia_index] > get_host_router_total_bw(link[i], HOST_ROUTER_BW_ALLOCATED)) { + + ret = false; + break; + } + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h new file mode 100644 index 0000000000..7292690383 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -0,0 +1,102 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DC_INC_LINK_DP_DPIA_BW_H_ +#define DC_INC_LINK_DP_DPIA_BW_H_ + +#include "link.h" + +/* Number of Host Routers per motherboard is 2 */ +#define MAX_HR_NUM 2 +/* Number of DPIA per host router is 2 */ +#define MAX_DPIA_NUM (MAX_HR_NUM * 2) + +/* + * Host Router BW type + */ +enum bw_type { + HOST_ROUTER_BW_ESTIMATED, + HOST_ROUTER_BW_ALLOCATED, + HOST_ROUTER_BW_INVALID, +}; + +/* + * Enable BW Allocation Mode Support from the DP-Tx side + * + * @link: pointer to the dc_link struct instance + * + * return: SUCCESS or FAILURE + */ +bool link_dp_dpia_set_dptx_usb4_bw_alloc_support(struct dc_link *link); + +/* + * Allocates only what the stream needs for bw, so if: + * If (stream_req_bw < or > already_allocated_bw_at_HPD) + * => Deallocate Max Bw & then allocate only what the stream needs + * + * @link: pointer to the dc_link struct instance + * @req_bw: Bw requested by the stream + * + * return: allocated bw else return 0 + */ +int link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw); + +/* + * Handle the USB4 BW Allocation related functionality here: + * Plug => Try to allocate max bw from timing parameters supported by the sink + * Unplug => de-allocate bw + * + * @link: pointer to the dc_link struct instance + * @peak_bw: Peak bw used by the link/sink + * + * return: allocated bw else return 0 + */ +int dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw); + +/* + * Handle function for when the status of the Request above is complete. + * We will find out the result of allocating on CM and update structs. + * + * @link: pointer to the dc_link struct instance + * @bw: Allocated or Estimated BW depending on the result + * @result: Response type + * + * return: none + */ +void dpia_handle_bw_alloc_response(struct dc_link *link, uint8_t bw, uint8_t result); + +/* + * Handle the validation of total BW here and confirm that the bw used by each + * DPIA doesn't exceed available BW for each host router (HR) + * + * @link[]: array of link pointer to all possible DPIA links + * @bw_needed[]: bw needed for each DPIA link based on timing + * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM + * + * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE + */ +bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias); + +#endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c new file mode 100644 index 0000000000..e047bbeaa4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -0,0 +1,490 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements DP HPD short pulse handling sequence according to DP + * specifications + * + */ + +#include "link_dp_irq_handler.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "link_dp_capability.h" +#include "link_edp_panel_control.h" +#include "link/accessories/link_dp_trace.h" +#include "link/link_dpms.h" +#include "dm_helpers.h" + +#define DC_LOGGER_INIT(logger) + +bool dp_parse_link_loss_status( + struct dc_link *link, + union hpd_irq_data *hpd_irq_dpcd_data) +{ + uint8_t irq_reg_rx_power_state = 0; + enum dc_status dpcd_result = DC_ERROR_UNEXPECTED; + union lane_status lane_status; + uint32_t lane; + bool sink_status_changed; + bool return_code; + + sink_status_changed = false; + return_code = false; + + if (link->cur_link_settings.lane_count == 0) + return return_code; + + /*1. Check that Link Status changed, before re-training.*/ + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* check status of lanes 0,1 + * changed DpcdAddress_Lane01Status (0x202) + */ + lane_status.raw = dp_get_nibble_at_index( + &hpd_irq_dpcd_data->bytes.lane01_status.raw, + lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + sink_status_changed = true; + break; + } + } + + /* Check interlane align.*/ + if (link_dp_get_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && + (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b || + !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b)) { + sink_status_changed = true; + } else if (!hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) { + sink_status_changed = true; + } + + if (sink_status_changed) { + + DC_LOG_HW_HPD_IRQ("%s: Link Status changed.\n", __func__); + + return_code = true; + + /*2. Check that we can handle interrupt: Not in FS DOS, + * Not in "Display Timeout" state, Link is trained. + */ + dpcd_result = core_link_read_dpcd(link, + DP_SET_POWER, + &irq_reg_rx_power_state, + sizeof(irq_reg_rx_power_state)); + + if (dpcd_result != DC_OK) { + DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain power state.\n", + __func__); + } else { + if (irq_reg_rx_power_state != DP_SET_POWER_D0) + return_code = false; + } + } + + return return_code; +} + +static bool handle_hpd_irq_psr_sink(struct dc_link *link) +{ + union dpcd_psr_configuration psr_configuration; + + if (!link->psr_settings.psr_feature_enabled) + return false; + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + 368,/*DpcdAddress_PSR_Enable_Cfg*/ + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + if (psr_configuration.bits.ENABLE) { + unsigned char dpcdbuf[3] = {0}; + union psr_error_status psr_error_status; + union psr_sink_psr_status psr_sink_psr_status; + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + 0x2006, /*DpcdAddress_PSR_Error_Status*/ + (unsigned char *) dpcdbuf, + sizeof(dpcdbuf)); + + /*DPCD 2006h ERROR STATUS*/ + psr_error_status.raw = dpcdbuf[0]; + /*DPCD 2008h SINK PANEL SELF REFRESH STATUS*/ + psr_sink_psr_status.raw = dpcdbuf[2]; + + if (psr_error_status.bits.LINK_CRC_ERROR || + psr_error_status.bits.RFB_STORAGE_ERROR || + psr_error_status.bits.VSC_SDP_ERROR) { + bool allow_active; + + /* Acknowledge and clear error bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 8198,/*DpcdAddress_PSR_Error_Status*/ + &psr_error_status.raw, + sizeof(psr_error_status.raw)); + + /* PSR error, disable and re-enable PSR */ + if (link->psr_settings.psr_allow_active) { + allow_active = false; + edp_set_psr_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + edp_set_psr_allow_active(link, &allow_active, true, false, NULL); + } + + return true; + } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == + PSR_SINK_STATE_ACTIVE_DISPLAY_FROM_SINK_RFB){ + /* No error is detect, PSR is active. + * We should return with IRQ_HPD handled without + * checking for loss of sync since PSR would have + * powered down main link. + */ + return true; + } + } + return false; +} + +static bool handle_hpd_irq_replay_sink(struct dc_link *link) +{ + union dpcd_replay_configuration replay_configuration; + /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ + union psr_error_status replay_error_status; + + if (!link->replay_settings.replay_feature_enabled) + return false; + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_SINK_PR_REPLAY_STATUS, + &replay_configuration.raw, + sizeof(replay_configuration.raw)); + + dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_PSR_ERROR_STATUS, + &replay_error_status.raw, + sizeof(replay_error_status.raw)); + + link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR = + replay_error_status.bits.LINK_CRC_ERROR; + link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR = + replay_configuration.bits.DESYNC_ERROR_STATUS; + link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR = + replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS; + + if (link->replay_settings.config.replay_error_status.bits.LINK_CRC_ERROR || + link->replay_settings.config.replay_error_status.bits.DESYNC_ERROR || + link->replay_settings.config.replay_error_status.bits.STATE_TRANSITION_ERROR) { + bool allow_active; + + /* Acknowledge and clear configuration bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_SINK_PR_REPLAY_STATUS, + &replay_configuration.raw, + sizeof(replay_configuration.raw)); + + /* Acknowledge and clear error bits */ + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_PSR_ERROR_STATUS,/*DpcdAddress_REPLAY_Error_Status*/ + &replay_error_status.raw, + sizeof(replay_error_status.raw)); + + /* Replay error, disable and re-enable Replay */ + if (link->replay_settings.replay_allow_active) { + allow_active = false; + edp_set_replay_allow_active(link, &allow_active, true, false, NULL); + allow_active = true; + edp_set_replay_allow_active(link, &allow_active, true, false, NULL); + } + } + return true; +} + +void dp_handle_link_loss(struct dc_link *link) +{ + struct pipe_ctx *pipes[MAX_PIPES]; + struct dc_state *state = link->dc->current_state; + uint8_t count; + int i; + + link_get_master_pipes_with_dpms_on(link, state, &count, pipes); + + for (i = 0; i < count; i++) + link_set_dpms_off(pipes[i]); + + for (i = count - 1; i >= 0; i--) { + // Always use max settings here for DP 1.4a LL Compliance CTS + if (link->is_automated) { + pipes[i]->link_config.dp_link_settings.lane_count = + link->verified_link_cap.lane_count; + pipes[i]->link_config.dp_link_settings.link_rate = + link->verified_link_cap.link_rate; + pipes[i]->link_config.dp_link_settings.link_spread = + link->verified_link_cap.link_spread; + } + link_set_dpms_on(link->dc->current_state, pipes[i]); + } +} + +static void read_dpcd204h_on_irq_hpd(struct dc_link *link, union hpd_irq_data *irq_data) +{ + enum dc_status retval; + union lane_align_status_updated dpcd_lane_status_updated; + + retval = core_link_read_dpcd( + link, + DP_LANE_ALIGN_STATUS_UPDATED, + &dpcd_lane_status_updated.raw, + sizeof(union lane_align_status_updated)); + + if (retval == DC_OK) { + irq_data->bytes.lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b = + dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b; + irq_data->bytes.lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b = + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b; + } +} + +enum dc_status dp_read_hpd_rx_irq_data( + struct dc_link *link, + union hpd_irq_data *irq_data) +{ + static enum dc_status retval; + + /* The HW reads 16 bytes from 200h on HPD, + * but if we get an AUX_DEFER, the HW cannot retry + * and this causes the CTS tests 4.3.2.1 - 3.2.4 to + * fail, so we now explicitly read 6 bytes which is + * the req from the above mentioned test cases. + * + * For DP 1.4 we need to read those from 2002h range. + */ + if (link->dpcd_caps.dpcd_rev.raw < DPCD_REV_14) + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT, + irq_data->raw, + sizeof(union hpd_irq_data)); + else { + /* Read 14 bytes in a single read and then copy only the required fields. + * This is more efficient than doing it in two separate AUX reads. */ + + uint8_t tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI + 1]; + + retval = core_link_read_dpcd( + link, + DP_SINK_COUNT_ESI, + tmp, + sizeof(tmp)); + + if (retval != DC_OK) + return retval; + + irq_data->bytes.sink_cnt.raw = tmp[DP_SINK_COUNT_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.device_service_irq.raw = tmp[DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0 - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane01_status.raw = tmp[DP_LANE0_1_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane23_status.raw = tmp[DP_LANE2_3_STATUS_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.lane_status_updated.raw = tmp[DP_LANE_ALIGN_STATUS_UPDATED_ESI - DP_SINK_COUNT_ESI]; + irq_data->bytes.sink_status.raw = tmp[DP_SINK_STATUS_ESI - DP_SINK_COUNT_ESI]; + + /* + * This display doesn't have correct values in DPCD200Eh. + * Read and check DPCD204h instead. + */ + if (link->wa_flags.read_dpcd204h_on_irq_hpd) + read_dpcd204h_on_irq_hpd(link, irq_data); + } + + return retval; +} + +/*************************Short Pulse IRQ***************************/ +bool dp_should_allow_hpd_rx_irq(const struct dc_link *link) +{ + /* + * Don't handle RX IRQ unless one of following is met: + * 1) The link is established (cur_link_settings != unknown) + * 2) We know we're dealing with a branch device, SST or MST + */ + + if ((link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || + is_dp_branch_device(link)) + return true; + + return false; +} + +bool dp_handle_hpd_rx_irq(struct dc_link *link, + union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, + bool defer_handling, bool *has_left_work) +{ + union hpd_irq_data hpd_irq_dpcd_data = {0}; + union device_service_irq device_service_clear = {0}; + enum dc_status result; + bool status = false; + + if (out_link_loss) + *out_link_loss = false; + + if (has_left_work) + *has_left_work = false; + /* For use cases related to down stream connection status change, + * PSR and device auto test, refer to function handle_sst_hpd_irq + * in DAL2.1*/ + + DC_LOG_HW_HPD_IRQ("%s: Got short pulse HPD on link %d\n", + __func__, link->link_index); + + + /* All the "handle_hpd_irq_xxx()" methods + * should be called only after + * dal_dpsst_ls_read_hpd_irq_data + * Order of calls is important too + */ + result = dp_read_hpd_rx_irq_data(link, &hpd_irq_dpcd_data); + if (out_hpd_irq_dpcd_data) + *out_hpd_irq_dpcd_data = hpd_irq_dpcd_data; + + if (result != DC_OK) { + DC_LOG_HW_HPD_IRQ("%s: DPCD read failed to obtain irq data\n", + __func__); + return false; + } + + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { + // Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC + link->is_automated = true; + device_service_clear.bits.AUTOMATED_TEST = 1; + core_link_write_dpcd( + link, + DP_DEVICE_SERVICE_IRQ_VECTOR, + &device_service_clear.raw, + sizeof(device_service_clear.raw)); + device_service_clear.raw = 0; + if (defer_handling && has_left_work) + *has_left_work = true; + else + dc_link_dp_handle_automated_test(link); + return false; + } + + if (!dp_should_allow_hpd_rx_irq(link)) { + DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n", + __func__, link->link_index); + return false; + } + + if (handle_hpd_irq_psr_sink(link)) + /* PSR-related error was detected and handled */ + return true; + + if (handle_hpd_irq_replay_sink(link)) + /* Replay-related error was detected and handled */ + return true; + + /* If PSR-related error handled, Main link may be off, + * so do not handle as a normal sink status change interrupt. + */ + + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; + return true; + } + + /* check if we have MST msg and return since we poll for it */ + if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + if (defer_handling && has_left_work) + *has_left_work = true; + return false; + } + + /* For now we only handle 'Downstream port status' case. + * If we got sink count changed it means + * Downstream port status changed, + * then DM should call DC to do the detection. + * NOTE: Do not handle link loss on eDP since it is internal link*/ + if ((link->connector_signal != SIGNAL_TYPE_EDP) && + dp_parse_link_loss_status( + link, + &hpd_irq_dpcd_data)) { + /* Connectivity log: link loss */ + CONN_DATA_LINK_LOSS(link, + hpd_irq_dpcd_data.raw, + sizeof(hpd_irq_dpcd_data), + "Status: "); + + if (defer_handling && has_left_work) + *has_left_work = true; + else + dp_handle_link_loss(link); + + status = false; + if (out_link_loss) + *out_link_loss = true; + + dp_trace_link_loss_increment(link); + } + + if (link->type == dc_connection_sst_branch && + hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT + != link->dpcd_sink_count) + status = true; + + /* reasons for HPD RX: + * 1. Link Loss - ie Re-train the Link + * 2. MST sideband message + * 3. Automated Test - ie. Internal Commit + * 4. CP (copy protection) - (not interesting for DM???) + * 5. DRR + * 6. Downstream Port status changed + * -ie. Detect - this the only one + * which is interesting for DM because + * it must call dc_link_detect. + */ + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h new file mode 100644 index 0000000000..ac33730fed --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.h @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_IRQ_HANDLER_H__ +#define __DC_LINK_DP_IRQ_HANDLER_H__ + +#include "link.h" +bool dp_parse_link_loss_status( + struct dc_link *link, + union hpd_irq_data *hpd_irq_dpcd_data); +bool dp_should_allow_hpd_rx_irq(const struct dc_link *link); +void dp_handle_link_loss(struct dc_link *link); +enum dc_status dp_read_hpd_rx_irq_data( + struct dc_link *link, + union hpd_irq_data *irq_data); +bool dp_handle_hpd_rx_irq(struct dc_link *link, + union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss, + bool defer_handling, bool *has_left_work); +#endif /* __DC_LINK_DP_IRQ_HANDLER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c new file mode 100644 index 0000000000..0050e0a06c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -0,0 +1,209 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements basic dp phy functionality such as enable/disable phy + * output and set lane/drive settings. This file is responsible for maintaining + * and update software state representing current phy status such as current + * link settings. + */ + +#include "link_dp_phy.h" +#include "link_dpcd.h" +#include "link_dp_training.h" +#include "link_dp_capability.h" +#include "clk_mgr.h" +#include "resource.h" +#include "link_enc_cfg.h" +#define DC_LOGGER \ + link->ctx->logger + +void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on) +{ + uint8_t state; + + state = on ? DP_POWER_STATE_D0 : DP_POWER_STATE_D3; + + if (link->sync_lt_in_progress) + return; + + core_link_write_dpcd(link, DP_SET_POWER, &state, + sizeof(state)); + +} + +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings) +{ + link->cur_link_settings = *link_settings; + link->dc->hwss.enable_dp_link_output(link, link_res, signal, + clock_source, link_settings); + dpcd_write_rx_power_ctrl(link, true); +} + +void dp_disable_link_phy(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal) +{ + struct dc *dc = link->ctx->dc; + + if (!link->wa_flags.dp_keep_receiver_powered && + !link->skip_implict_edp_power_control) + dpcd_write_rx_power_ctrl(link, false); + + dc->hwss.disable_link_output(link, link_res, signal); + /* Clear current link setting.*/ + memset(&link->cur_link_settings, 0, + sizeof(link->cur_link_settings)); + + if (dc->clk_mgr->funcs->notify_link_rate_change) + dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); +} + +static inline bool is_immediate_downstream(struct dc_link *link, uint32_t offset) +{ + return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == + offset); +} + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + + if ((link_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && + !is_immediate_downstream(link, offset)) + return; + + if (link_hwss->ext.set_dp_lane_settings) + link_hwss->ext.set_dp_lane_settings(link, link_res, + &link_settings->link_settings, + link_settings->hw_lane_settings); + + memmove(link->cur_lane_setting, + link_settings->hw_lane_settings, + sizeof(link->cur_lane_setting)); +} + +void dp_set_drive_settings( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + /* program ASIC PHY settings*/ + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, + lt_settings->dpcd_lane_settings); + + /* Notify DP sink the PHY settings from source */ + dpcd_set_lane_settings(link, lt_settings, DPRX); +} + +enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready) +{ + /* FEC has to be "set ready" before the link training. + * The policy is to always train with FEC + * if the sink supports it and leave it enabled on link. + * If FEC is not supported, disable it. + */ + struct link_encoder *link_enc = NULL; + enum dc_status status = DC_OK; + uint8_t fec_config = 0; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (!dp_should_enable_fec(link)) + return status; + + if (link_enc->funcs->fec_set_ready && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (ready) { + fec_config = 1; + status = core_link_write_dpcd(link, + DP_FEC_CONFIGURATION, + &fec_config, + sizeof(fec_config)); + if (status == DC_OK) { + link_enc->funcs->fec_set_ready(link_enc, true); + link->fec_state = dc_link_fec_ready; + } else { + link_enc->funcs->fec_set_ready(link_enc, false); + link->fec_state = dc_link_fec_not_ready; + dm_error("dpcd write failed to set fec_ready"); + } + } else if (link->fec_state == dc_link_fec_ready) { + fec_config = 0; + status = core_link_write_dpcd(link, + DP_FEC_CONFIGURATION, + &fec_config, + sizeof(fec_config)); + link_enc->funcs->fec_set_ready(link_enc, false); + link->fec_state = dc_link_fec_not_ready; + } + } + + return status; +} + +void dp_set_fec_enable(struct dc_link *link, bool enable) +{ + struct link_encoder *link_enc = NULL; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + + if (!dp_should_enable_fec(link)) + return; + + if (link_enc->funcs->fec_set_enable && + link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { + if (link->fec_state == dc_link_fec_ready && enable) { + /* Accord to DP spec, FEC enable sequence can first + * be transmitted anytime after 1000 LL codes have + * been transmitted on the link after link training + * completion. Using 1 lane RBR should have the maximum + * time for transmitting 1000 LL codes which is 6.173 us. + * So use 7 microseconds delay instead. + */ + udelay(7); + link_enc->funcs->fec_set_enable(link_enc, true); + link->fec_state = dc_link_fec_enabled; + } else if (link->fec_state == dc_link_fec_enabled && !enable) { + link_enc->funcs->fec_set_enable(link_enc, false); + link->fec_state = dc_link_fec_ready; + } + } +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h new file mode 100644 index 0000000000..1eb0619d67 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.h @@ -0,0 +1,59 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_DP_PHY_H__ +#define __DC_LINK_DP_PHY_H__ + +#include "link.h" +void dp_enable_link_phy( + struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal, + enum clock_source_id clock_source, + const struct dc_link_settings *link_settings); + +void dp_disable_link_phy(struct dc_link *link, + const struct link_resource *link_res, + enum signal_type signal); + +void dp_set_hw_lane_settings( + struct dc_link *link, + const struct link_resource *link_res, + const struct link_training_settings *link_settings, + uint32_t offset); + +void dp_set_drive_settings( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +enum dc_status dp_set_fec_ready(struct dc_link *link, + const struct link_resource *link_res, bool ready); + +void dp_set_fec_enable(struct dc_link *link, bool enable); + +void dpcd_write_rx_power_ctrl(struct dc_link *link, bool on); + +#endif /* __DC_LINK_DP_PHY_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c new file mode 100644 index 0000000000..5a0b045189 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -0,0 +1,1732 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements all generic dp link training helper functions and top + * level generic training sequence. All variations of dp link training sequence + * should be called inside the top level training functions in this file to + * ensure the integrity of our overall training procedure across different types + * of link encoding and back end hardware. + */ +#include "link_dp_training.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_auxless.h" +#include "link_dp_training_dpia.h" +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dpcd.h" +#include "link/accessories/link_dp_trace.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "link_edp_panel_control.h" +#include "link/link_detection.h" +#include "link/link_validation.h" +#include "atomfirmware.h" +#include "link_enc_cfg.h" +#include "resource.h" +#include "dm_helpers.h" + +#define DC_LOGGER \ + link->ctx->logger + +#define POST_LT_ADJ_REQ_LIMIT 6 +#define POST_LT_ADJ_REQ_TIMEOUT 200 +#define LINK_TRAINING_RETRY_DELAY 50 /* ms */ + +void dp_log_training_result( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum link_training_result status) +{ + char *link_rate = "Unknown"; + char *lt_result = "Unknown"; + char *lt_spread = "Disabled"; + + switch (lt_settings->link_settings.link_rate) { + case LINK_RATE_LOW: + link_rate = "RBR"; + break; + case LINK_RATE_RATE_2: + link_rate = "R2"; + break; + case LINK_RATE_RATE_3: + link_rate = "R3"; + break; + case LINK_RATE_HIGH: + link_rate = "HBR"; + break; + case LINK_RATE_RBR2: + link_rate = "RBR2"; + break; + case LINK_RATE_RATE_6: + link_rate = "R6"; + break; + case LINK_RATE_HIGH2: + link_rate = "HBR2"; + break; + case LINK_RATE_RATE_8: + link_rate = "R8"; + break; + case LINK_RATE_HIGH3: + link_rate = "HBR3"; + break; + case LINK_RATE_UHBR10: + link_rate = "UHBR10"; + break; + case LINK_RATE_UHBR13_5: + link_rate = "UHBR13.5"; + break; + case LINK_RATE_UHBR20: + link_rate = "UHBR20"; + break; + default: + break; + } + + switch (status) { + case LINK_TRAINING_SUCCESS: + lt_result = "pass"; + break; + case LINK_TRAINING_CR_FAIL_LANE0: + lt_result = "CR failed lane0"; + break; + case LINK_TRAINING_CR_FAIL_LANE1: + lt_result = "CR failed lane1"; + break; + case LINK_TRAINING_CR_FAIL_LANE23: + lt_result = "CR failed lane23"; + break; + case LINK_TRAINING_EQ_FAIL_CR: + lt_result = "CR failed in EQ"; + break; + case LINK_TRAINING_EQ_FAIL_CR_PARTIAL: + lt_result = "CR failed in EQ partially"; + break; + case LINK_TRAINING_EQ_FAIL_EQ: + lt_result = "EQ failed"; + break; + case LINK_TRAINING_LQA_FAIL: + lt_result = "LQA failed"; + break; + case LINK_TRAINING_LINK_LOSS: + lt_result = "Link loss"; + break; + case DP_128b_132b_LT_FAILED: + lt_result = "LT_FAILED received"; + break; + case DP_128b_132b_MAX_LOOP_COUNT_REACHED: + lt_result = "max loop count reached"; + break; + case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT: + lt_result = "channel EQ timeout"; + break; + case DP_128b_132b_CDS_DONE_TIMEOUT: + lt_result = "CDS timeout"; + break; + default: + break; + } + + switch (lt_settings->link_settings.link_spread) { + case LINK_SPREAD_DISABLED: + lt_spread = "Disabled"; + break; + case LINK_SPREAD_05_DOWNSPREAD_30KHZ: + lt_spread = "0.5% 30KHz"; + break; + case LINK_SPREAD_05_DOWNSPREAD_33KHZ: + lt_spread = "0.5% 33KHz"; + break; + default: + break; + } + + /* Connectivity log: link training */ + + /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ + + CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", + link_rate, + lt_settings->link_settings.lane_count, + lt_result, + lt_settings->hw_lane_settings[0].VOLTAGE_SWING, + lt_settings->hw_lane_settings[0].PRE_EMPHASIS, + lt_spread); +} + +uint8_t dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + uint8_t disable_scrabled_data_symbols = 0; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + case DP_TRAINING_PATTERN_SEQUENCE_2: + case DP_TRAINING_PATTERN_SEQUENCE_3: + disable_scrabled_data_symbols = 1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + case DP_128b_132b_TPS1: + case DP_128b_132b_TPS2: + disable_scrabled_data_symbols = 0; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + return disable_scrabled_data_symbols; +} + +enum dpcd_training_patterns + dp_training_pattern_to_dpcd_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern) +{ + enum dpcd_training_patterns dpcd_tr_pattern = + DPCD_TRAINING_PATTERN_VIDEOIDLE; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS1\n", __func__); + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS2\n", __func__); + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS3\n", __func__); + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern TPS4\n", __func__); + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; + break; + case DP_128b_132b_TPS1: + DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS1\n", __func__); + dpcd_tr_pattern = DPCD_128b_132b_TPS1; + break; + case DP_128b_132b_TPS2: + DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS2\n", __func__); + dpcd_tr_pattern = DPCD_128b_132b_TPS2; + break; + case DP_128b_132b_TPS2_CDS: + DC_LOG_HW_LINK_TRAINING("%s: Using DP 128b/132b training pattern TPS2 CDS\n", + __func__); + dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; + break; + case DP_TRAINING_PATTERN_VIDEOIDLE: + DC_LOG_HW_LINK_TRAINING("%s: Using DP training pattern videoidle\n", __func__); + dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; + break; + default: + ASSERT(0); + DC_LOG_HW_LINK_TRAINING("%s: Invalid HW Training pattern: %d\n", + __func__, pattern); + break; + } + + return dpcd_tr_pattern; +} + +uint8_t dp_get_nibble_at_index(const uint8_t *buf, + uint32_t index) +{ + uint8_t nibble; + nibble = buf[index / 2]; + + if (index % 2) + nibble >>= 4; + else + nibble &= 0x0F; + + return nibble; +} + +void dp_wait_for_training_aux_rd_interval( + struct dc_link *link, + uint32_t wait_in_micro_secs) +{ + fsleep(wait_in_micro_secs); + + DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", + __func__, + wait_in_micro_secs); +} + +/* maximum pre emphasis level allowed for each voltage swing level*/ +static const enum dc_pre_emphasis voltage_swing_to_pre_emphasis[] = { + PRE_EMPHASIS_LEVEL3, + PRE_EMPHASIS_LEVEL2, + PRE_EMPHASIS_LEVEL1, + PRE_EMPHASIS_DISABLED }; + +static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing( + enum dc_voltage_swing voltage) +{ + enum dc_pre_emphasis pre_emphasis; + pre_emphasis = PRE_EMPHASIS_MAX_LEVEL; + + if (voltage <= VOLTAGE_SWING_MAX_LEVEL) + pre_emphasis = voltage_swing_to_pre_emphasis[voltage]; + + return pre_emphasis; + +} + +static void maximize_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + struct dc_lane_settings max_requested; + + max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; + max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; + max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; + + /* Determine what the maximum of the requested settings are*/ + for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { + if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING) + max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING; + + if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) + max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; + if (lane_settings[lane].FFE_PRESET.settings.level > + max_requested.FFE_PRESET.settings.level) + max_requested.FFE_PRESET.settings.level = + lane_settings[lane].FFE_PRESET.settings.level; + } + + /* make sure the requested settings are + * not higher than maximum settings*/ + if (max_requested.VOLTAGE_SWING > VOLTAGE_SWING_MAX_LEVEL) + max_requested.VOLTAGE_SWING = VOLTAGE_SWING_MAX_LEVEL; + + if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) + max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; + if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) + max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; + + /* make sure the pre-emphasis matches the voltage swing*/ + if (max_requested.PRE_EMPHASIS > + get_max_pre_emphasis_for_voltage_swing( + max_requested.VOLTAGE_SWING)) + max_requested.PRE_EMPHASIS = + get_max_pre_emphasis_for_voltage_swing( + max_requested.VOLTAGE_SWING); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; + lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; + lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; + } +} + +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]) +{ + uint8_t lane = 0; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = + (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING); + dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET = + (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS); + dpcd_lane_settings[lane].bits.MAX_SWING_REACHED = + (hw_lane_settings[lane].VOLTAGE_SWING == + VOLTAGE_SWING_MAX_LEVEL ? 1 : 0); + dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED = + (hw_lane_settings[lane].PRE_EMPHASIS == + PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = + hw_lane_settings[lane].FFE_PRESET.settings.level; + } + } +} + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) +{ + uint8_t link_rate = 0; + enum dp_link_encoding encoding = link_dp_get_encoding_format(link_settings); + + if (encoding == DP_128b_132b_ENCODING) + switch (link_settings->link_rate) { + case LINK_RATE_UHBR10: + link_rate = 0x1; + break; + case LINK_RATE_UHBR20: + link_rate = 0x2; + break; + case LINK_RATE_UHBR13_5: + link_rate = 0x4; + break; + default: + link_rate = 0; + break; + } + else if (encoding == DP_8b_10b_ENCODING) + link_rate = (uint8_t) link_settings->link_rate; + else + link_rate = 0; + + return link_rate; +} + +/* Only used for channel equalization */ +uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval) +{ + unsigned int aux_rd_interval_us = 400; + + switch (dpcd_aux_read_interval) { + case 0x01: + aux_rd_interval_us = 4000; + break; + case 0x02: + aux_rd_interval_us = 8000; + break; + case 0x03: + aux_rd_interval_us = 12000; + break; + case 0x04: + aux_rd_interval_us = 16000; + break; + case 0x05: + aux_rd_interval_us = 32000; + break; + case 0x06: + aux_rd_interval_us = 64000; + break; + default: + break; + } + + return aux_rd_interval_us; +} + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + if (ln_count >= LANE_COUNT_ONE && !dpcd_lane_status[0].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE0; + else if (ln_count >= LANE_COUNT_TWO && !dpcd_lane_status[1].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE1; + else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[2].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE23; + else if (ln_count >= LANE_COUNT_FOUR && !dpcd_lane_status[3].bits.CR_DONE_0) + result = LINK_TRAINING_CR_FAIL_LANE23; + return result; +} + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset) +{ + return (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (offset != 0); +} + +bool dp_is_max_vs_reached( + const struct link_training_settings *lt_settings) +{ + uint32_t lane; + for (lane = 0; lane < + (uint32_t)(lt_settings->link_settings.lane_count); + lane++) { + if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET + == VOLTAGE_SWING_MAX_LEVEL) + return true; + } + return false; + +} + +bool dp_is_cr_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool done = true; + uint32_t lane; + /*LANEx_CR_DONE bits All 1's?*/ + for (lane = 0; lane < (uint32_t)(ln_count); lane++) { + if (!dpcd_lane_status[lane].bits.CR_DONE_0) + done = false; + } + return done; + +} + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool done = true; + uint32_t lane; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.CHANNEL_EQ_DONE_0) + done = false; + return done; +} + +bool dp_is_symbol_locked(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status) +{ + bool locked = true; + uint32_t lane; + for (lane = 0; lane < (uint32_t)(ln_count); lane++) + if (!dpcd_lane_status[lane].bits.SYMBOL_LOCKED_0) + locked = false; + return locked; +} + +bool dp_is_interlane_aligned(union lane_align_status_updated align_status) +{ + return align_status.bits.INTERLANE_ALIGN_DONE == 1; +} + +enum link_training_result dp_check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + union lane_status lane_status; + uint8_t dpcd_buf[6] = {0}; + uint32_t lane; + + core_link_read_dpcd( + link, + DP_SINK_COUNT, + (uint8_t *)(dpcd_buf), + sizeof(dpcd_buf)); + + /*parse lane status*/ + for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) { + /* + * check lanes status + */ + lane_status.raw = dp_get_nibble_at_index(&dpcd_buf[2], lane); + + if (!lane_status.bits.CHANNEL_EQ_DONE_0 || + !lane_status.bits.CR_DONE_0 || + !lane_status.bits.SYMBOL_LOCKED_0) { + /* if one of the channel equalization, clock + * recovery or symbol lock is dropped + * consider it as (link has been + * dropped) dp sink status has changed + */ + status = LINK_TRAINING_LINK_LOSS; + break; + } + } + + return status; +} + +enum dc_status dp_get_lane_status_and_lane_adjust( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + uint32_t offset) +{ + unsigned int lane01_status_address = DP_LANE0_1_STATUS; + uint8_t lane_adjust_offset = 4; + unsigned int lane01_adjust_address; + uint8_t dpcd_buf[6] = {0}; + uint32_t lane; + enum dc_status status; + + if (is_repeater(link_training_setting, offset)) { + lane01_status_address = + DP_LANE0_1_STATUS_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + lane_adjust_offset = 3; + } + + status = core_link_read_dpcd( + link, + lane01_status_address, + (uint8_t *)(dpcd_buf), + sizeof(dpcd_buf)); + + if (status != DC_OK) { + DC_LOG_HW_LINK_TRAINING("%s:\n Failed to read from address 0x%X," + " keep current lane status and lane adjust unchanged", + __func__, + lane01_status_address); + return status; + } + + for (lane = 0; lane < + (uint32_t)(link_training_setting->link_settings.lane_count); + lane++) { + + ln_status[lane].raw = + dp_get_nibble_at_index(&dpcd_buf[0], lane); + ln_adjust[lane].raw = + dp_get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane); + } + + ln_align->raw = dpcd_buf[2]; + + if (is_repeater(link_training_setting, offset)) { + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + offset, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + offset, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } else { + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01Status = %x\n 0x%X Lane23Status = %x\n ", + __func__, + lane01_status_address, dpcd_buf[0], + lane01_status_address + 1, dpcd_buf[1]); + + lane01_adjust_address = DP_ADJUST_REQUEST_LANE0_1; + + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X Lane01AdjustRequest = %x\n 0x%X Lane23AdjustRequest = %x\n", + __func__, + lane01_adjust_address, + dpcd_buf[lane_adjust_offset], + lane01_adjust_address + 1, + dpcd_buf[lane_adjust_offset + 1]); + } + + return status; +} + +static void override_lane_settings(const struct link_training_settings *lt_settings, + struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]) +{ + uint32_t lane; + + if (lt_settings->voltage_swing == NULL && + lt_settings->pre_emphasis == NULL && + lt_settings->ffe_preset == NULL && + lt_settings->post_cursor2 == NULL) + + return; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (lt_settings->voltage_swing) + lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; + if (lt_settings->pre_emphasis) + lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; + if (lt_settings->post_cursor2) + lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; + if (lt_settings->ffe_preset) + lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; + } +} + +void dp_get_lttpr_mode_override(struct dc_link *link, enum lttpr_mode *override) +{ + if (!dp_is_lttpr_present(link)) + return; + + if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_TRANSPARENT) { + *override = LTTPR_MODE_TRANSPARENT; + } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_TRANSPARENT) { + *override = LTTPR_MODE_NON_TRANSPARENT; + } else if (link->dc->debug.lttpr_mode_override == LTTPR_MODE_NON_LTTPR) { + *override = LTTPR_MODE_NON_LTTPR; + } + DC_LOG_DC("lttpr_mode_override chose LTTPR_MODE = %d\n", (uint8_t)(*override)); +} + +void override_training_settings( + struct dc_link *link, + const struct dc_link_training_overrides *overrides, + struct link_training_settings *lt_settings) +{ + uint32_t lane; + + /* Override link spread */ + if (!link->dp_ss_off && overrides->downspread != NULL) + lt_settings->link_settings.link_spread = *overrides->downspread ? + LINK_SPREAD_05_DOWNSPREAD_30KHZ + : LINK_SPREAD_DISABLED; + + /* Override lane settings */ + if (overrides->voltage_swing != NULL) + lt_settings->voltage_swing = overrides->voltage_swing; + if (overrides->pre_emphasis != NULL) + lt_settings->pre_emphasis = overrides->pre_emphasis; + if (overrides->post_cursor2 != NULL) + lt_settings->post_cursor2 = overrides->post_cursor2; + if (overrides->ffe_preset != NULL) + lt_settings->ffe_preset = overrides->ffe_preset; + /* Override HW lane settings with BIOS forced values if present */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && + lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) { + lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING; + lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS; + lt_settings->always_match_dpcd_with_hw_lane_settings = false; + } + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = + lt_settings->voltage_swing != NULL ? + *lt_settings->voltage_swing : + VOLTAGE_SWING_LEVEL0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = + lt_settings->pre_emphasis != NULL ? + *lt_settings->pre_emphasis + : PRE_EMPHASIS_DISABLED; + lt_settings->hw_lane_settings[lane].POST_CURSOR2 = + lt_settings->post_cursor2 != NULL ? + *lt_settings->post_cursor2 + : POST_CURSOR2_DISABLED; + } + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + /* Override training timings */ + if (overrides->cr_pattern_time != NULL) + lt_settings->cr_pattern_time = *overrides->cr_pattern_time; + if (overrides->eq_pattern_time != NULL) + lt_settings->eq_pattern_time = *overrides->eq_pattern_time; + if (overrides->pattern_for_cr != NULL) + lt_settings->pattern_for_cr = *overrides->pattern_for_cr; + if (overrides->pattern_for_eq != NULL) + lt_settings->pattern_for_eq = *overrides->pattern_for_eq; + if (overrides->enhanced_framing != NULL) + lt_settings->enhanced_framing = *overrides->enhanced_framing; + if (link->preferred_training_settings.fec_enable != NULL) + lt_settings->should_set_fec_ready = *link->preferred_training_settings.fec_enable; + + /* Check DP tunnel LTTPR mode debug option. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->dc->debug.dpia_debug.bits.force_non_lttpr) + lt_settings->lttpr_mode = LTTPR_MODE_NON_LTTPR; + + dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); + +} + +enum dc_dp_training_pattern decide_cr_training_pattern( + const struct dc_link_settings *link_settings) +{ + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + default: + return DP_TRAINING_PATTERN_SEQUENCE_1; + case DP_128b_132b_ENCODING: + return DP_128b_132b_TPS1; + } +} + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + struct link_encoder *link_enc; + struct encoder_feature_support *enc_caps; + struct dpcd_caps *rx_caps = &link->dpcd_caps; + enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + + link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link_enc); + enc_caps = &link_enc->features; + + switch (link_dp_get_encoding_format(link_settings)) { + case DP_8b_10b_ENCODING: + if (enc_caps->flags.bits.IS_TPS4_CAPABLE && + rx_caps->max_down_spread.bits.TPS4_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + else if (enc_caps->flags.bits.IS_TPS3_CAPABLE && + rx_caps->max_ln_count.bits.TPS3_SUPPORTED) + pattern = DP_TRAINING_PATTERN_SEQUENCE_3; + else + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + case DP_128b_132b_ENCODING: + pattern = DP_128b_132b_TPS2; + break; + default: + pattern = DP_TRAINING_PATTERN_SEQUENCE_2; + break; + } + return pattern; +} + +enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, + struct dc_link_settings *link_setting) +{ + enum dp_link_encoding encoding = link_dp_get_encoding_format(link_setting); + + if (encoding == DP_8b_10b_ENCODING) + return dp_decide_8b_10b_lttpr_mode(link); + else if (encoding == DP_128b_132b_ENCODING) + return dp_decide_128b_132b_lttpr_mode(link); + + ASSERT(0); + return LTTPR_MODE_NON_LTTPR; +} + +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane *dpcd_lane_settings) +{ + uint32_t lane; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) { + hw_lane_settings[lane].VOLTAGE_SWING = + (enum dc_voltage_swing)(ln_adjust[lane].bits. + VOLTAGE_SWING_LANE); + hw_lane_settings[lane].PRE_EMPHASIS = + (enum dc_pre_emphasis)(ln_adjust[lane].bits. + PRE_EMPHASIS_LANE); + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + hw_lane_settings[lane].FFE_PRESET.raw = + ln_adjust[lane].tx_ffe.PRESET_VALUE; + } + } + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + + if (lt_settings->disallow_per_lane_settings) { + /* we find the maximum of the requested settings across all lanes*/ + /* and set this maximum for all lanes*/ + maximize_lane_settings(lt_settings, hw_lane_settings); + override_lane_settings(lt_settings, hw_lane_settings); + + if (lt_settings->always_match_dpcd_with_hw_lane_settings) + dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); + } + +} + +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) + decide_8b_10b_training_settings(link, link_settings, lt_settings); + else if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) + decide_128b_132b_training_settings(link, link_settings, lt_settings); +} + + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link) +{ + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + return core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); +} + +static enum dc_status configure_lttpr_mode_non_transparent( + struct dc_link *link, + const struct link_training_settings *lt_settings) +{ + /* aux timeout is already set to extended */ + /* RESET/SET lttpr mode to enable non transparent mode */ + uint8_t repeater_cnt; + uint32_t aux_interval_address; + uint8_t repeater_id; + enum dc_status result = DC_ERROR_UNEXPECTED; + uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT; + const struct dc *dc = link->dc; + + enum dp_link_encoding encoding = dc->link_srv->dp_get_encoding_format(<_settings->link_settings); + + if (encoding == DP_8b_10b_ENCODING) { + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Transparent Mode\n", __func__); + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + } + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + DC_LOG_HW_LINK_TRAINING("%s\n Set LTTPR to Non Transparent Mode\n", __func__); + + repeater_mode = DP_PHY_REPEATER_MODE_NON_TRANSPARENT; + result = core_link_write_dpcd(link, + DP_PHY_REPEATER_MODE, + (uint8_t *)&repeater_mode, + sizeof(repeater_mode)); + + if (result == DC_OK) { + link->dpcd_caps.lttpr_caps.mode = repeater_mode; + } + + if (encoding == DP_8b_10b_ENCODING) { + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Driver does not need to train the first hop. Skip DPCD read and clear + * AUX_RD_INTERVAL for DPTX-to-DPIA hop. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; + + for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); + core_link_read_dpcd( + link, + aux_interval_address, + (uint8_t *)&link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1], + sizeof(link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1])); + link->dpcd_caps.lttpr_caps.aux_rd_interval[repeater_id - 1] &= 0x7F; + } + } + } + + return result; +} + +enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_training_settings *lt_settings) +{ + enum dc_status status = DC_OK; + + if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) + status = configure_lttpr_mode_transparent(link); + + else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + status = configure_lttpr_mode_non_transparent(link, lt_settings); + + return status; +} + +void repeater_training_done(struct dc_link *link, uint32_t offset) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + + const uint32_t dpcd_base_lt_offset = + DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + /* Set training not in progress*/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = DPCD_TRAINING_PATTERN_VIDEOIDLE; + + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Id: %d 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +static void dpcd_exit_training_mode(struct dc_link *link, enum dp_link_encoding encoding) +{ + uint8_t sink_status = 0; + uint8_t i; + + /* clear training pattern set */ + dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); + + if (encoding == DP_128b_132b_ENCODING) { + /* poll for intra-hop disable */ + for (i = 0; i < 10; i++) { + if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && + (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0) + break; + fsleep(1000); + } + } +} + +enum dc_status dpcd_configure_channel_coding(struct dc_link *link, + struct link_training_settings *lt_settings) +{ + enum dp_link_encoding encoding = + link_dp_get_encoding_format( + <_settings->link_settings); + enum dc_status status; + + status = core_link_write_dpcd( + link, + DP_MAIN_LINK_CHANNEL_CODING_SET, + (uint8_t *) &encoding, + 1); + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X MAIN_LINK_CHANNEL_CODING_SET = %x\n", + __func__, + DP_MAIN_LINK_CHANNEL_CODING_SET, + encoding); + + return status; +} + +void dpcd_set_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern training_pattern) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dp_training_pattern_to_dpcd_training_pattern( + link, training_pattern); + + core_link_write_dpcd( + link, + DP_TRAINING_PATTERN_SET, + &dpcd_pattern.raw, + 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x pattern = %x\n", + __func__, + DP_TRAINING_PATTERN_SET, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); +} + +enum dc_status dpcd_set_link_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings) +{ + uint8_t rate; + enum dc_status status; + + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + + downspread.raw = (uint8_t) + (lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (link->ep_type == DISPLAY_ENDPOINT_PHY && + lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + status = core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + status = core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + + if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_13 && + lt_settings->link_settings.use_link_rate_set == true) { + rate = 0; + /* WA for some MUX chips that will power down with eDP and lose supported + * link rate set for eDP 1.4. Source reads DPCD 0x010 again to ensure + * MUX chip gets link rate set back before link training. + */ + if (link->connector_signal == SIGNAL_TYPE_EDP) { + uint8_t supported_link_rates[16]; + + core_link_read_dpcd(link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + } + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + status = core_link_write_dpcd(link, DP_LINK_RATE_SET, + <_settings->link_settings.link_rate_set, 1); + } else { + rate = get_dpcd_link_rate(<_settings->link_settings); + + status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + } + + if (rate) { + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n %x rate set = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_RATE_SET, + lt_settings->link_settings.link_rate_set, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + } + + return status; +} + +enum dc_status dpcd_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + uint32_t offset) +{ + unsigned int lane0_set_address; + enum dc_status status; + lane0_set_address = DP_TRAINING_LANE0_SET; + + if (is_repeater(link_training_setting, offset)) + lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + status = core_link_write_dpcd(link, + lane0_set_address, + (uint8_t *)(link_training_setting->dpcd_lane_settings), + link_training_setting->link_settings.lane_count); + + if (is_repeater(link_training_setting, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + lane0_set_address, + link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } + + return status; +} + +void dpcd_set_lt_pattern_and_lane_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum dc_dp_training_pattern pattern, + uint32_t offset) +{ + uint32_t dpcd_base_lt_offset; + uint8_t dpcd_lt_buffer[5] = {0}; + union dpcd_training_pattern dpcd_pattern = {0}; + uint32_t size_in_bytes; + bool edp_workaround = false; /* TODO link_prop.INTERNAL */ + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET; + + if (is_repeater(lt_settings, offset)) + dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1)); + + /***************************************************************** + * DpcdAddress_TrainingPatternSet + *****************************************************************/ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dp_training_pattern_to_dpcd_training_pattern(link, pattern); + + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dp_initialize_scrambling_data_symbols(link, pattern); + + dpcd_lt_buffer[DP_TRAINING_PATTERN_SET - DP_TRAINING_PATTERN_SET] + = dpcd_pattern.raw; + + if (is_repeater(lt_settings, offset)) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_base_lt_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } + + /* concatenate everything into one buffer*/ + size_in_bytes = lt_settings->link_settings.lane_count * + sizeof(lt_settings->dpcd_lane_settings[0]); + + // 0x00103 - 0x00102 + memmove( + &dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET], + lt_settings->dpcd_lane_settings, + size_in_bytes); + + if (is_repeater(lt_settings, offset)) { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" + " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + offset, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } else { + if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + } + if (edp_workaround) { + /* for eDP write in 2 parts because the 5-byte burst is + * causing issues on some eDP panels (EPR#366724) + */ + core_link_write_dpcd( + link, + DP_TRAINING_PATTERN_SET, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); + + core_link_write_dpcd( + link, + DP_TRAINING_LANE0_SET, + (uint8_t *)(lt_settings->dpcd_lane_settings), + size_in_bytes); + + } else if (link_dp_get_encoding_format(<_settings->link_settings) == + DP_128b_132b_ENCODING) { + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + sizeof(dpcd_lt_buffer)); + } else + /* write it all in (1 + number-of-lanes)-byte burst*/ + core_link_write_dpcd( + link, + dpcd_base_lt_offset, + dpcd_lt_buffer, + size_in_bytes + sizeof(dpcd_pattern.raw)); +} + +void start_clock_recovery_pattern_early(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n", + __func__); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); + udelay(400); +} + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + const struct link_hwss *link_hwss = get_link_hwss(link, link_res); + struct encoder_set_dp_phy_pattern_param pattern_param = {0}; + + pattern_param.dp_phy_pattern = test_pattern; + pattern_param.custom_pattern = custom_pattern; + pattern_param.custom_pattern_size = custom_pattern_size; + pattern_param.dp_panel_mode = dp_get_panel_mode(link); + + if (link_hwss->ext.set_dp_link_test_pattern) + link_hwss->ext.set_dp_link_test_pattern(link, link_res, &pattern_param); +} + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset) +{ + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; + + switch (pattern) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; + break; + case DP_128b_132b_TPS1: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; + break; + case DP_128b_132b_TPS2: + test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; + break; + default: + break; + } + + dp_set_hw_test_pattern(link, link_res, test_pattern, NULL, 0); + + return true; +} + +static bool perform_post_lt_adj_req_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum dc_lane_count lane_count = + lt_settings->link_settings.lane_count; + + uint32_t adj_req_count; + uint32_t adj_req_timer; + bool req_drv_setting_changed; + uint32_t lane; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + req_drv_setting_changed = false; + for (adj_req_count = 0; adj_req_count < POST_LT_ADJ_REQ_LIMIT; + adj_req_count++) { + + req_drv_setting_changed = false; + + for (adj_req_timer = 0; + adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT; + adj_req_timer++) { + + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + DPRX); + + if (dpcd_lane_status_updated.bits. + POST_LT_ADJ_REQ_IN_PROGRESS == 0) + return true; + + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) + return false; + + if (!dp_is_ch_eq_done(lane_count, dpcd_lane_status) || + !dp_is_symbol_locked(lane_count, dpcd_lane_status) || + !dp_is_interlane_aligned(dpcd_lane_status_updated)) + return false; + + for (lane = 0; lane < (uint32_t)(lane_count); lane++) { + + if (lt_settings-> + dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET != + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE || + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET != + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) { + + req_drv_setting_changed = true; + break; + } + } + + if (req_drv_setting_changed) { + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + + dp_set_drive_settings(link, + link_res, + lt_settings); + break; + } + + msleep(1); + } + + if (!req_drv_setting_changed) { + DC_LOG_WARNING("%s: Post Link Training Adjust Request Timed out\n", + __func__); + + ASSERT(0); + return true; + } + } + DC_LOG_WARNING("%s: Post Link Training Adjust Request limit reached\n", + __func__); + + ASSERT(0); + return true; + +} + +static enum link_training_result dp_transition_to_video_idle( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + enum link_training_result status) +{ + union lane_count_set lane_count_set = {0}; + + /* 4. mainlink output idle pattern*/ + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + /* + * 5. post training adjust if required + * If the upstream DPTX and downstream DPRX both support TPS4, + * TPS4 must be used instead of POST_LT_ADJ_REQ. + */ + if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || + lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { + /* delay 5ms after Main Link output idle pattern and then check + * DPCD 0202h. + */ + if (link->connector_signal != SIGNAL_TYPE_EDP && status == LINK_TRAINING_SUCCESS) { + msleep(5); + status = dp_check_link_loss_status(link, lt_settings); + } + return status; + } + + if (status == LINK_TRAINING_SUCCESS && + perform_post_lt_adj_req_sequence(link, link_res, lt_settings) == false) + status = LINK_TRAINING_LQA_FAIL; + + lane_count_set.bits.LANE_COUNT_SET = lt_settings->link_settings.lane_count; + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + core_link_write_dpcd( + link, + DP_LANE_COUNT_SET, + &lane_count_set.raw, + sizeof(lane_count_set)); + + return status; +} + +enum link_training_result dp_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + bool skip_video_pattern) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + struct link_training_settings lt_settings = {0}; + enum dp_link_encoding encoding = + link_dp_get_encoding_format(link_settings); + + /* decide training settings */ + dp_decide_training_settings( + link, + link_settings, + <_settings); + + override_training_settings( + link, + &link->preferred_training_settings, + <_settings); + + /* reset previous training states */ + dpcd_exit_training_mode(link, encoding); + + /* configure link prior to entering training mode */ + dpcd_configure_lttpr_mode(link, <_settings); + dp_set_fec_ready(link, link_res, lt_settings.should_set_fec_ready); + dpcd_configure_channel_coding(link, <_settings); + + /* enter training mode: + * Per DP specs starting from here, DPTX device shall not issue + * Non-LT AUX transactions inside training mode. + */ + if ((link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && encoding == DP_8b_10b_ENCODING) + if (link->dc->config.use_old_fixed_vs_sequence) + status = dp_perform_fixed_vs_pe_training_sequence_legacy(link, link_res, <_settings); + else + status = dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); + else if (encoding == DP_8b_10b_ENCODING) + status = dp_perform_8b_10b_link_training(link, link_res, <_settings); + else if (encoding == DP_128b_132b_ENCODING) + status = dp_perform_128b_132b_link_training(link, link_res, <_settings); + else + ASSERT(0); + + /* exit training mode */ + dpcd_exit_training_mode(link, encoding); + + /* switch to video idle */ + if ((status == LINK_TRAINING_SUCCESS) || !skip_video_pattern) + status = dp_transition_to_video_idle(link, + link_res, + <_settings, + status); + + /* dump debug data */ + dp_log_training_result(link, <_settings, status); + if (status != LINK_TRAINING_SUCCESS) + link->ctx->dc->debug_data.ltFailCount++; + return status; +} + +bool perform_link_training_with_retries( + const struct dc_link_settings *link_setting, + bool skip_video_pattern, + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal, + bool do_fallback) +{ + int j; + uint8_t delay_between_attempts = LINK_TRAINING_RETRY_DELAY; + struct dc_stream_state *stream = pipe_ctx->stream; + struct dc_link *link = stream->link; + enum dp_panel_mode panel_mode = dp_get_panel_mode(link); + enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; + struct dc_link_settings cur_link_settings = *link_setting; + struct dc_link_settings max_link_settings = *link_setting; + const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + int fail_count = 0; + bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ + bool is_link_bw_min = /* RBR x 1 */ + (cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE); + + dp_trace_commit_lt_init(link); + + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + /* We need to do this before the link training to ensure the idle + * pattern in SST mode will be sent right after the link training + */ + link_hwss->setup_stream_encoder(pipe_ctx); + + dp_trace_set_lt_start_timestamp(link, false); + j = 0; + while (j < attempts && fail_count < (attempts * 10)) { + + DC_LOG_HW_LINK_TRAINING("%s: Beginning link(%d) training attempt %u of %d @ rate(%d) x lane(%d) @ spread = %x\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, + cur_link_settings.link_rate, cur_link_settings.lane_count, + cur_link_settings.link_spread); + + dp_enable_link_phy( + link, + &pipe_ctx->link_res, + signal, + pipe_ctx->clock_source->id, + &cur_link_settings); + + if (stream->sink_patches.dppowerup_delay > 0) { + int delay_dp_power_up_in_ms = stream->sink_patches.dppowerup_delay; + + msleep(delay_dp_power_up_in_ms); + } + + if (panel_mode == DP_PANEL_MODE_EDP) { + struct cp_psp *cp_psp = &stream->ctx->cp_psp; + + if (cp_psp && cp_psp->funcs.enable_assr) { + /* ASSR is bound to fail with unsigned PSP + * verstage used during devlopment phase. + * Report and continue with eDP panel mode to + * perform eDP link training with right settings + */ + bool result; + result = cp_psp->funcs.enable_assr(cp_psp->handle, link); + if (!result && link->panel_mode != DP_PANEL_MODE_EDP) + panel_mode = DP_PANEL_MODE_DEFAULT; + } + } + + dp_set_panel_mode(link, panel_mode); + + if (link->aux_access_disabled) { + dp_perform_link_training_skip_aux(link, &pipe_ctx->link_res, &cur_link_settings); + return true; + } else { + /** @todo Consolidate USB4 DP and DPx.x training. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + status = dpia_perform_link_training( + link, + &pipe_ctx->link_res, + &cur_link_settings, + skip_video_pattern); + + /* Transmit idle pattern once training successful. */ + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) { + dp_set_hw_test_pattern(link, &pipe_ctx->link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + // Update verified link settings to current one + // Because DPIA LT might fallback to lower link setting. + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + link->verified_link_cap.link_rate = link->cur_link_settings.link_rate; + link->verified_link_cap.lane_count = link->cur_link_settings.lane_count; + dm_helpers_dp_mst_update_branch_bandwidth(link->ctx, link); + } + } + } else { + status = dp_perform_link_training( + link, + &pipe_ctx->link_res, + &cur_link_settings, + skip_video_pattern); + } + + dp_trace_lt_total_count_increment(link, false); + dp_trace_lt_result_update(link, status, false); + dp_trace_set_lt_end_timestamp(link, false); + if (status == LINK_TRAINING_SUCCESS && !is_link_bw_low) + return true; + } + + fail_count++; + dp_trace_lt_fail_count_update(link, fail_count, false); + if (link->ep_type == DISPLAY_ENDPOINT_PHY) { + /* latest link training still fail or link training is aborted + * skip delay and keep PHY on + */ + if (j == (attempts - 1) || (status == LINK_TRAINING_ABORT)) + break; + } + + if (j == (attempts - 1)) { + DC_LOG_WARNING( + "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, + cur_link_settings.link_rate, cur_link_settings.lane_count, + cur_link_settings.link_spread, status); + } else { + DC_LOG_HW_LINK_TRAINING( + "%s: Link(%d) training attempt %u of %d failed @ rate(%d) x lane(%d) @ spread = %x : fail reason:(%d)\n", + __func__, link->link_index, (unsigned int)j + 1, attempts, + cur_link_settings.link_rate, cur_link_settings.lane_count, + cur_link_settings.link_spread, status); + } + + dp_disable_link_phy(link, &pipe_ctx->link_res, signal); + + /* Abort link training if failure due to sink being unplugged. */ + if (status == LINK_TRAINING_ABORT) { + enum dc_connection_type type = dc_connection_none; + + link_detect_connection_type(link, &type); + if (type == dc_connection_none) { + DC_LOG_HW_LINK_TRAINING("%s: Aborting training because sink unplugged\n", __func__); + break; + } + } + + /* Try to train again at original settings if: + * - not falling back between training attempts; + * - aborted previous attempt due to reasons other than sink unplug; + * - successfully trained but at a link rate lower than that required by stream; + * - reached minimum link bandwidth. + */ + if (!do_fallback || (status == LINK_TRAINING_ABORT) || + (status == LINK_TRAINING_SUCCESS && is_link_bw_low) || + is_link_bw_min) { + j++; + cur_link_settings = *link_setting; + delay_between_attempts += LINK_TRAINING_RETRY_DELAY; + is_link_bw_low = false; + is_link_bw_min = (cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE); + + } else if (do_fallback) { /* Try training at lower link bandwidth if doing fallback. */ + uint32_t req_bw; + uint32_t link_bw; + enum dc_link_encoding_format link_encoding = DC_LINK_ENCODING_UNSPECIFIED; + + decide_fallback_link_setting(link, &max_link_settings, + &cur_link_settings, status); + + if (link_dp_get_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_8b_10b; + else if (link_dp_get_encoding_format(&cur_link_settings) == DP_128b_132b_ENCODING) + link_encoding = DC_LINK_ENCODING_DP_128b_132b; + + /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to + * minimum link bandwidth. + */ + req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing, link_encoding); + link_bw = dp_link_bandwidth_kbps(link, &cur_link_settings); + is_link_bw_low = (req_bw > link_bw); + is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && + (cur_link_settings.lane_count <= LANE_COUNT_ONE)); + + if (is_link_bw_low) + DC_LOG_WARNING( + "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", + __func__, link->link_index, req_bw, link_bw); + } + + msleep(delay_between_attempts); + } + + return false; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h new file mode 100644 index 0000000000..851bd17317 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.h @@ -0,0 +1,185 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_H__ +#define __DC_LINK_DP_TRAINING_H__ +#include "link.h" + +bool perform_link_training_with_retries( + const struct dc_link_settings *link_setting, + bool skip_video_pattern, + int attempts, + struct pipe_ctx *pipe_ctx, + enum signal_type signal, + bool do_fallback); + +enum link_training_result dp_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_settings, + bool skip_video_pattern); + +bool dp_set_hw_training_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dc_dp_training_pattern pattern, + uint32_t offset); + +void dp_set_hw_test_pattern( + struct dc_link *link, + const struct link_resource *link_res, + enum dp_test_pattern test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size); + +void dpcd_set_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern training_pattern); + +/* Write DPCD drive settings. */ +enum dc_status dpcd_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + uint32_t offset); + +/* Write DPCD link configuration data. */ +enum dc_status dpcd_set_link_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings); + +void dpcd_set_lt_pattern_and_lane_settings( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum dc_dp_training_pattern pattern, + uint32_t offset); + +/* Read training status and adjustment requests from DPCD. */ +enum dc_status dp_get_lane_status_and_lane_adjust( + struct dc_link *link, + const struct link_training_settings *link_training_setting, + union lane_status ln_status[LANE_COUNT_DP_MAX], + union lane_align_status_updated *ln_align, + union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + uint32_t offset); + +enum dc_status dpcd_configure_lttpr_mode( + struct dc_link *link, + struct link_training_settings *lt_settings); + +enum dc_status configure_lttpr_mode_transparent(struct dc_link *link); + +enum dc_status dpcd_configure_channel_coding( + struct dc_link *link, + struct link_training_settings *lt_settings); + +void repeater_training_done(struct dc_link *link, uint32_t offset); + +void start_clock_recovery_pattern_early(struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +void dp_decide_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings); + +void dp_decide_lane_settings( + const struct link_training_settings *lt_settings, + const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX], + struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane *dpcd_lane_settings); + +enum dc_dp_training_pattern decide_cr_training_pattern( + const struct dc_link_settings *link_settings); + +enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link, + const struct dc_link_settings *link_settings); + +enum lttpr_mode dp_decide_lttpr_mode(struct dc_link *link, + struct dc_link_settings *link_setting); + +void dp_get_lttpr_mode_override(struct dc_link *link, + enum lttpr_mode *override); + +void override_training_settings( + struct dc_link *link, + const struct dc_link_training_overrides *overrides, + struct link_training_settings *lt_settings); + +/* Check DPCD training status registers to detect link loss. */ +enum link_training_result dp_check_link_loss_status( + struct dc_link *link, + const struct link_training_settings *link_training_setting); + +bool dp_is_cr_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); + +bool dp_is_ch_eq_done(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); +bool dp_is_symbol_locked(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); +bool dp_is_interlane_aligned(union lane_align_status_updated align_status); + +bool is_repeater(const struct link_training_settings *lt_settings, uint32_t offset); + +bool dp_is_max_vs_reached( + const struct link_training_settings *lt_settings); + +uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings); + +enum link_training_result dp_get_cr_failure(enum dc_lane_count ln_count, + union lane_status *dpcd_lane_status); + +void dp_hw_to_dpcd_lane_settings( + const struct link_training_settings *lt_settings, + const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX], + union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]); + +void dp_wait_for_training_aux_rd_interval( + struct dc_link *link, + uint32_t wait_in_micro_secs); + +enum dpcd_training_patterns + dp_training_pattern_to_dpcd_training_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern); + +uint8_t dp_initialize_scrambling_data_symbols( + struct dc_link *link, + enum dc_dp_training_pattern pattern); + +void dp_log_training_result( + struct dc_link *link, + const struct link_training_settings *lt_settings, + enum link_training_result status); + +uint32_t dp_translate_training_aux_read_interval( + uint32_t dpcd_aux_read_interval); + +uint8_t dp_get_nibble_at_index(const uint8_t *buf, + uint32_t index); +#endif /* __DC_LINK_DP_TRAINING_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c new file mode 100644 index 0000000000..db87cfe37b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.c @@ -0,0 +1,265 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 128b/132b link training software policies and + * sequences. + */ +#include "link_dp_training_128b_132b.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ + link->ctx->logger + +static enum dc_status dpcd_128b_132b_set_lane_settings( + struct dc_link *link, + const struct link_training_settings *link_training_setting) +{ + enum dc_status status = core_link_write_dpcd(link, + DP_TRAINING_LANE0_SET, + (uint8_t *)(link_training_setting->dpcd_lane_settings), + sizeof(link_training_setting->dpcd_lane_settings)); + + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", + __func__, + DP_TRAINING_LANE0_SET, + link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); + return status; +} + +static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, + uint32_t *interval_in_us) +{ + union dp_128b_132b_training_aux_rd_interval dpcd_interval; + uint32_t interval_unit = 0; + + dpcd_interval.raw = 0; + core_link_read_dpcd(link, DP_128B132B_TRAINING_AUX_RD_INTERVAL, + &dpcd_interval.raw, sizeof(dpcd_interval.raw)); + interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */ + /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) * + * INTERVAL_UNIT. The maximum is 256 ms + */ + *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000; +} + +static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + uint8_t loop_count; + uint32_t aux_rd_interval = 0; + uint32_t wait_time = 0; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + enum dc_status status = DC_OK; + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* Transmit 128b/132b_TPS1 over Main-Link */ + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, DPRX); + + /* Set TRAINING_PATTERN_SET to 01h */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cr); + + /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */ + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_eq, DPRX); + + /* Set loop counter to start from 1 */ + loop_count = 1; + + /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */ + dpcd_set_lt_pattern_and_lane_settings(link, lt_settings, + lt_settings->pattern_for_eq, DPRX); + + /* poll for channel EQ done */ + while (result == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, aux_rd_interval); + wait_time += aux_rd_interval; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count, + dpcd_lane_status)) { + /* pass */ + break; + } else if (loop_count >= lt_settings->eq_loop_count_limit) { + result = DP_128b_132b_MAX_LOOP_COUNT_REACHED; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else { + dp_set_hw_lane_settings(link, link_res, lt_settings, DPRX); + dpcd_128b_132b_set_lane_settings(link, lt_settings); + } + loop_count++; + } + + /* poll for EQ interlane align done */ + while (result == LINK_TRAINING_SUCCESS) { + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (wait_time >= lt_settings->eq_wait_time_limit) { + result = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->eq_pattern_time); + wait_time += lt_settings->eq_pattern_time; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + } + } + + return result; +} + +static enum link_training_result dp_perform_128b_132b_cds_done_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + /* Assumption: assume hardware has transmitted eq pattern */ + enum dc_status status = DC_OK; + enum link_training_result result = LINK_TRAINING_SUCCESS; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + uint32_t wait_time = 0; + + /* initiate CDS done sequence */ + dpcd_set_training_pattern(link, lt_settings->pattern_for_cds); + + /* poll for CDS interlane align done and symbol lock */ + while (result == LINK_TRAINING_SUCCESS) { + dp_wait_for_training_aux_rd_interval(link, + lt_settings->cds_pattern_time); + wait_time += lt_settings->cds_pattern_time; + status = dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status, + &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + } else if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) && + dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) { + /* pass */ + break; + } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) { + result = DP_128b_132b_LT_FAILED; + } else if (wait_time >= lt_settings->cds_wait_time_limit) { + result = DP_128b_132b_CDS_DONE_TIMEOUT; + } + } + + return result; +} + +enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + + /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */ + if (link->dc->debug.legacy_dp2_lt) { + struct link_training_settings legacy_settings; + + decide_8b_10b_training_settings(link, + <_settings->link_settings, + &legacy_settings); + return dp_perform_8b_10b_link_training(link, link_res, &legacy_settings); + } + + dpcd_set_link_settings(link, lt_settings); + + if (result == LINK_TRAINING_SUCCESS) { + result = dp_perform_128b_132b_channel_eq_done_sequence(link, link_res, lt_settings); + if (result == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); + } + + if (result == LINK_TRAINING_SUCCESS) { + result = dp_perform_128b_132b_cds_done_sequence(link, link_res, lt_settings); + if (result == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: CDS done.\n", __func__); + } + + return result; +} + +void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, 0, sizeof(*lt_settings)); + + lt_settings->link_settings = *link_settings; + /* TODO: should decide link spread when populating link_settings */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED : + LINK_SPREAD_05_DOWNSPREAD_30KHZ; + + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings); + lt_settings->eq_pattern_time = 2500; + lt_settings->eq_wait_time_limit = 400000; + lt_settings->eq_loop_count_limit = 20; + lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS; + lt_settings->cds_pattern_time = 2500; + lt_settings->cds_wait_time_limit = (dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000; + lt_settings->disallow_per_lane_settings = true; + lt_settings->lttpr_mode = dp_decide_128b_132b_lttpr_mode(link); + dp_hw_to_dpcd_lane_settings(lt_settings, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link) +{ + enum lttpr_mode mode = LTTPR_MODE_NON_LTTPR; + + if (dp_is_lttpr_present(link)) + mode = LTTPR_MODE_NON_TRANSPARENT; + + DC_LOG_DC("128b_132b chose LTTPR_MODE %d.\n", mode); + return mode; +} + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h new file mode 100644 index 0000000000..2147f24efc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_128b_132b.h @@ -0,0 +1,42 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_128B_132B_H__ +#define __DC_LINK_DP_TRAINING_128B_132B_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_128b_132b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +void decide_128b_132b_training_settings(struct dc_link *link, + const struct dc_link_settings *link_settings, + struct link_training_settings *lt_settings); + +enum lttpr_mode dp_decide_128b_132b_lttpr_mode(struct dc_link *link); + +#endif /* __DC_LINK_DP_TRAINING_128B_132B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c new file mode 100644 index 0000000000..2b4c15b0b4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.c @@ -0,0 +1,420 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements dp 8b/10b link training software policies and + * sequences. + */ +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" + +#define DC_LOGGER \ + link->ctx->logger + +static int32_t get_cr_training_aux_rd_interval(struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + union training_aux_rd_interval training_rd_interval; + uint32_t wait_in_micro_secs = 100; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) + wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; + } + return wait_in_micro_secs; +} + +static uint32_t get_eq_training_aux_rd_interval( + struct dc_link *link, + const struct dc_link_settings *link_settings) +{ + union training_aux_rd_interval training_rd_interval; + + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); + if (link_dp_get_encoding_format(link_settings) == DP_128b_132b_ENCODING) { + core_link_read_dpcd( + link, + DP_128B132B_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } else if (link_dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING && + link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { + core_link_read_dpcd( + link, + DP_TRAINING_AUX_RD_INTERVAL, + (uint8_t *)&training_rd_interval, + sizeof(training_rd_interval)); + } + + switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) { + case 0: return 400; + case 1: return 4000; + case 2: return 8000; + case 3: return 12000; + case 4: return 16000; + case 5: return 32000; + case 6: return 64000; + default: return 400; + } +} + +void decide_8b_10b_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings) +{ + memset(lt_settings, '\0', sizeof(struct link_training_settings)); + + /* Initialize link settings */ + lt_settings->link_settings.use_link_rate_set = link_setting->use_link_rate_set; + lt_settings->link_settings.link_rate_set = link_setting->link_rate_set; + lt_settings->link_settings.link_rate = link_setting->link_rate; + lt_settings->link_settings.lane_count = link_setting->lane_count; + /* TODO hard coded to SS for now + * lt_settings.link_settings.link_spread = + * dal_display_path_is_ss_supported( + * path_mode->display_path) ? + * LINK_SPREAD_05_DOWNSPREAD_30KHZ : + * LINK_SPREAD_DISABLED; + */ + lt_settings->link_settings.link_spread = link->dp_ss_off ? + LINK_SPREAD_DISABLED : LINK_SPREAD_05_DOWNSPREAD_30KHZ; + lt_settings->cr_pattern_time = get_cr_training_aux_rd_interval(link, link_setting); + lt_settings->eq_pattern_time = get_eq_training_aux_rd_interval(link, link_setting); + lt_settings->pattern_for_cr = decide_cr_training_pattern(link_setting); + lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting); + lt_settings->enhanced_framing = 1; + lt_settings->should_set_fec_ready = true; + lt_settings->disallow_per_lane_settings = true; + lt_settings->always_match_dpcd_with_hw_lane_settings = true; + lt_settings->lttpr_mode = dp_decide_8b_10b_lttpr_mode(link); + dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); +} + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link) +{ + bool is_lttpr_present = dp_is_lttpr_present(link); + bool vbios_lttpr_force_non_transparent = link->dc->caps.vbios_lttpr_enable; + bool vbios_lttpr_aware = link->dc->caps.vbios_lttpr_aware; + + if (!is_lttpr_present) + return LTTPR_MODE_NON_LTTPR; + + if (vbios_lttpr_aware) { + if (vbios_lttpr_force_non_transparent) { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT due to VBIOS DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); + return LTTPR_MODE_NON_TRANSPARENT; + } else { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default due to VBIOS not set DCE_INFO_CAPS_LTTPR_SUPPORT_ENABLE set to 1.\n"); + return LTTPR_MODE_TRANSPARENT; + } + } + + if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A && + link->dc->caps.extended_aux_timeout_support) { + DC_LOG_DC("chose LTTPR_MODE_NON_TRANSPARENT by default and dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A set to 1.\n"); + return LTTPR_MODE_NON_TRANSPARENT; + } + + DC_LOG_DC("chose LTTPR_MODE_NON_LTTPR.\n"); + return LTTPR_MODE_NON_LTTPR; +} + +enum link_training_result perform_8b_10b_clock_recovery_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + retries_cr = 0; + retry_count = 0; + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + if (!link->ctx->dc->work_arounds.lt_early_cr_pattern) + dp_set_hw_training_pattern(link, link_res, lt_settings->pattern_for_cr, offset); + + /* najeeb - The synaptics MST hub can put the LT in + * infinite loop by switching the VS + */ + /* between level 0 and level 1 continuously, here + * we try for CR lock for LinkTrainingMaxCRRetry count*/ + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + + /* 1. call HWSS to set lane settings*/ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + offset); + + /* 2. update DPCD of the receiver*/ + if (!retry_count) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration.*/ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + offset); + else + dpcd_set_lane_settings( + link, + lt_settings, + offset); + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + offset); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); + return LINK_TRAINING_SUCCESS; + } + + /* 6. max VS reached*/ + if ((link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING) && + dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings*/ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient*/ + if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else if ((link_dp_get_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && + lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == + dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + return dp_get_cr_failure(lane_count, dpcd_lane_status); +} + +enum link_training_result perform_8b_10b_channel_equalization_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset) +{ + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + if (is_repeater(lt_settings, offset) && link_dp_get_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, offset); + + /* 2. update DPCD*/ + if (!retries_ch_eq) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, offset); + else + dpcd_set_lane_settings(link, lt_settings, offset); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + if (is_repeater(lt_settings, offset)) + wait_time_microsec = + dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[offset - 1]); + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink*/ + + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + offset); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) + return dpcd_lane_status[0].bits.CR_DONE_0 ? + LINK_TRAINING_EQ_FAIL_CR_PARTIAL : + LINK_TRAINING_EQ_FAIL_CR; + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) + return LINK_TRAINING_SUCCESS; + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + + return LINK_TRAINING_EQ_FAIL_EQ; + +} + +enum link_training_result dp_perform_8b_10b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + + uint8_t repeater_cnt; + uint8_t repeater_id; + uint8_t lane = 0; + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, lt_settings); + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); + break; + } + + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + repeater_id); + if (status == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); + + repeater_training_done(link, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); + if (status == LINK_TRAINING_SUCCESS) + DC_LOG_HW_LINK_TRAINING("%s: Channel EQ done.\n", __func__); + } + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h new file mode 100644 index 0000000000..d26de15ce9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_8b_10b.h @@ -0,0 +1,61 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_8B_10B_H__ +#define __DC_LINK_DP_TRAINING_8B_10B_H__ +#include "link_dp_training.h" + +/* to avoid infinite loop where-in the receiver + * switches between different VS + */ +#define LINK_TRAINING_MAX_CR_RETRY 100 +#define LINK_TRAINING_MAX_RETRY_COUNT 5 + +enum link_training_result dp_perform_8b_10b_link_training( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +enum link_training_result perform_8b_10b_clock_recovery_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +enum link_training_result perform_8b_10b_channel_equalization_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t offset); + +enum lttpr_mode dp_decide_8b_10b_lttpr_mode(struct dc_link *link); + +void decide_8b_10b_training_settings( + struct dc_link *link, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings); + +#endif /* __DC_LINK_DP_TRAINING_8B_10B_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c new file mode 100644 index 0000000000..4c6b886a9d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.c @@ -0,0 +1,79 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + */ +#include "link_dp_training_auxless.h" +#include "link_dp_phy.h" +#define DC_LOGGER \ + link->ctx->logger +bool dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting) +{ + struct link_training_settings lt_settings = {0}; + + dp_decide_training_settings( + link, + link_setting, + <_settings); + override_training_settings( + link, + &link->preferred_training_settings, + <_settings); + + /* 1. Perform_clock_recovery_sequence. */ + + /* transmit training pattern for clock recovery */ + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_cr, DPRX); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + + /* wait receiver to lock-on*/ + dp_wait_for_training_aux_rd_interval(link, lt_settings.cr_pattern_time); + + /* 2. Perform_channel_equalization_sequence. */ + + /* transmit training pattern for channel equalization. */ + dp_set_hw_training_pattern(link, link_res, lt_settings.pattern_for_eq, DPRX); + + /* call HWSS to set lane settings*/ + dp_set_hw_lane_settings(link, link_res, <_settings, DPRX); + + /* wait receiver to lock-on. */ + dp_wait_for_training_aux_rd_interval(link, lt_settings.eq_pattern_time); + + /* 3. Perform_link_training_int. */ + + /* Mainlink output idle pattern. */ + dp_set_hw_test_pattern(link, link_res, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0); + + dp_log_training_result(link, <_settings, LINK_TRAINING_SUCCESS); + + return true; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h new file mode 100644 index 0000000000..546387a5f3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_auxless.h @@ -0,0 +1,35 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_AUXLESS_H__ +#define __DC_LINK_DP_TRAINING_AUXLESS_H__ +#include "link_dp_training.h" + +bool dp_perform_link_training_skip_aux( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting); +#endif /* __DC_LINK_DP_TRAINING_AUXLESS_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c new file mode 100644 index 0000000000..4f4e899e5c --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -0,0 +1,1048 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This module implements functionality for training DPIA links. + */ +#include "link_dp_training_dpia.h" +#include "dc.h" +#include "inc/core_status.h" +#include "dpcd_defs.h" + +#include "link_dp_dpia.h" +#include "link_hwss.h" +#include "dm_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_training_8b_10b.h" +#include "link_dp_capability.h" +#include "dc_dmub_srv.h" +#define DC_LOGGER \ + link->ctx->logger + +/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */ +#define DPIA_CLK_SYNC_DELAY 16000 + +/* Extend interval between training status checks for manual testing. */ +#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000 + +#define TRAINING_AUX_RD_INTERVAL 100 //us + +/* SET_CONFIG message types sent by driver. */ +enum dpia_set_config_type { + DPIA_SET_CFG_SET_LINK = 0x01, + DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05, + DPIA_SET_CFG_SET_TRAINING = 0x18, + DPIA_SET_CFG_SET_VSPE = 0x19 +}; + +/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */ +enum dpia_set_config_ts { + DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */ + DPIA_TS_TPS1 = 0x01, + DPIA_TS_TPS2 = 0x02, + DPIA_TS_TPS3 = 0x03, + DPIA_TS_TPS4 = 0x07, + DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */ +}; + +/* SET_CONFIG message data associated with messages sent by driver. */ +union dpia_set_config_data { + struct { + uint8_t mode : 1; + uint8_t reserved : 7; + } set_link; + struct { + uint8_t stage; + } set_training; + struct { + uint8_t swing : 2; + uint8_t max_swing_reached : 1; + uint8_t pre_emph : 2; + uint8_t max_pre_emph_reached : 1; + uint8_t reserved : 2; + } set_vspe; + uint8_t raw; +}; + + +/* Configure link as prescribed in link_setting; set LTTPR mode; and + * Initialize link training settings. + * Abort link training if sink unplug detected. + * + * @param link DPIA link being trained. + * @param[in] link_setting Lane count, link rate and downspread control. + * @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis). + */ +static enum link_training_result dpia_configure_link( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting, + struct link_training_settings *lt_settings) +{ + enum dc_status status; + bool fec_enable; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + lt_settings->lttpr_mode); + + dp_decide_training_settings( + link, + link_setting, + lt_settings); + + dp_get_lttpr_mode_override(link, <_settings->lttpr_mode); + + status = dpcd_configure_channel_coding(link, lt_settings); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + /* Configure lttpr mode */ + status = dpcd_configure_lttpr_mode(link, lt_settings); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + /* Set link rate, lane count and spread. */ + status = dpcd_set_link_settings(link, lt_settings); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + if (link->preferred_training_settings.fec_enable != NULL) + fec_enable = *link->preferred_training_settings.fec_enable; + else + fec_enable = true; + status = dp_set_fec_ready(link, link_res, fec_enable); + if (status != DC_OK && link->is_hpd_pending) + return LINK_TRAINING_ABORT; + + return LINK_TRAINING_SUCCESS; +} + +static enum dc_status core_link_send_set_config( + struct dc_link *link, + uint8_t msg_type, + uint8_t msg_data) +{ + struct set_config_cmd_payload payload; + enum set_config_status set_config_result = SET_CONFIG_PENDING; + + /* prepare set_config payload */ + payload.msg_type = msg_type; + payload.msg_data = msg_data; + + if (!link->ddc->ddc_pin && !link->aux_access_disabled && + (dm_helpers_dmub_set_config_sync(link->ctx, + link, &payload, &set_config_result) == -1)) { + return DC_ERROR_UNEXPECTED; + } + + /* set_config should return ACK if successful */ + return (set_config_result == SET_CONFIG_ACK_RECEIVED) ? DC_OK : DC_ERROR_UNEXPECTED; +} + +/* Build SET_CONFIG message data payload for specified message type. */ +static uint8_t dpia_build_set_config_data( + enum dpia_set_config_type type, + struct dc_link *link, + struct link_training_settings *lt_settings) +{ + union dpia_set_config_data data; + + data.raw = 0; + + switch (type) { + case DPIA_SET_CFG_SET_LINK: + data.set_link.mode = lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0; + break; + case DPIA_SET_CFG_SET_PHY_TEST_MODE: + break; + case DPIA_SET_CFG_SET_VSPE: + /* Assume all lanes have same drive settings. */ + data.set_vspe.swing = lt_settings->hw_lane_settings[0].VOLTAGE_SWING; + data.set_vspe.pre_emph = lt_settings->hw_lane_settings[0].PRE_EMPHASIS; + data.set_vspe.max_swing_reached = + lt_settings->hw_lane_settings[0].VOLTAGE_SWING == VOLTAGE_SWING_MAX_LEVEL ? 1 : 0; + data.set_vspe.max_pre_emph_reached = + lt_settings->hw_lane_settings[0].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0; + break; + default: + ASSERT(false); /* Message type not supported by helper function. */ + break; + } + + return data.raw; +} + +/* Convert DC training pattern to DPIA training stage. */ +static enum dc_status convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern tps, enum dpia_set_config_ts *ts) +{ + enum dc_status status = DC_OK; + + switch (tps) { + case DP_TRAINING_PATTERN_SEQUENCE_1: + *ts = DPIA_TS_TPS1; + break; + case DP_TRAINING_PATTERN_SEQUENCE_2: + *ts = DPIA_TS_TPS2; + break; + case DP_TRAINING_PATTERN_SEQUENCE_3: + *ts = DPIA_TS_TPS3; + break; + case DP_TRAINING_PATTERN_SEQUENCE_4: + *ts = DPIA_TS_TPS4; + break; + case DP_TRAINING_PATTERN_VIDEOIDLE: + *ts = DPIA_TS_DPRX_DONE; + break; + default: /* TPS not supported by helper function. */ + ASSERT(false); + *ts = DPIA_TS_DPRX_DONE; + status = DC_UNSUPPORTED_VALUE; + break; + } + + return status; +} + +/* Write training pattern to DPCD. */ +static enum dc_status dpcd_set_lt_pattern( + struct dc_link *link, + enum dc_dp_training_pattern pattern, + uint32_t hop) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; + enum dc_status status; + + if (hop != DPRX) + dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); + + /* DpcdAddress_TrainingPatternSet */ + dpcd_pattern.v1_4.TRAINING_PATTERN_SET = + dp_training_pattern_to_dpcd_training_pattern(link, pattern); + + dpcd_pattern.v1_4.SCRAMBLING_DISABLE = + dp_initialize_scrambling_data_symbols(link, pattern); + + if (hop != DPRX) { + DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n", + __func__, + hop, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } else { + DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n", + __func__, + dpcd_tps_offset, + dpcd_pattern.v1_4.TRAINING_PATTERN_SET); + } + + status = core_link_write_dpcd( + link, + dpcd_tps_offset, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); + + return status; +} + +/* Execute clock recovery phase of link training for specified hop in display + * path.in non-transparent mode: + * - Driver issues both DPCD and SET_CONFIG transactions. + * - TPS1 is transmitted for any hops downstream of DPOA. + * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. + * - CR for the first hop (DPTX-to-DPIA) is assumed to be successful. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_cr_non_transparent( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + enum dc_status status; + uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ + uint32_t retry_count = 0; + uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL; /* From DP spec, CR read interval is always 100us. */ + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + uint8_t set_cfg_data; + enum dpia_set_config_ts ts; + + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. + * Fix inherited from perform_clock_recovery_sequence() - + * the DP equivalent of this function: + * Required for Synaptics MST hub which can put the LT in + * infinite loop by switching the VS between level 0 and level 1 + * continuously. + */ + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + /* DPTX-to-DPIA */ + if (hop == repeater_cnt) { + /* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that + * non-transparent link training has started. + * This also enables the transmission of clk_sync packets. + */ + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_LINK, + link, + lt_settings); + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_LINK, + set_cfg_data); + /* CR for this hop is considered successful as long as + * SET_CONFIG message is acknowledged by DPOA. + */ + if (status == DC_OK) + result = LINK_TRAINING_SUCCESS; + else + result = LINK_TRAINING_ABORT; + break; + } + + /* DPOA-to-x */ + /* Instruct DPOA to transmit TPS1 then update DPCD. */ + if (retry_count == 0) { + status = convert_trng_ptn_to_trng_stg(lt_settings->pattern_for_cr, &ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_TRAINING, + ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + /* Update DPOA drive settings then DPCD. DPOA does only adjusts + * drive settings for hops immediately downstream. + */ + if (hop == repeater_cnt - 1) { + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_VSPE, + link, + lt_settings); + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_VSPE, + set_cfg_data); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + status = dpcd_set_lane_settings(link, lt_settings, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* Check if clock recovery successful. */ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); + result = LINK_TRAINING_SUCCESS; + break; + } + + result = dp_get_cr_failure(lane_count, dpcd_lane_status); + + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* Count number of attempts with same drive settings. + * Note: settings are the same for all lanes, + * so comparing first lane is sufficient. + */ + if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == + dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) + retries_cr++; + else + retries_cr = 0; + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, + lt_settings->dpcd_lane_settings); + retry_count++; + } + + /* Abort link training if clock recovery failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING( + "%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + retry_count, + status); + + return result; +} + +/* Execute clock recovery phase of link training in transparent LTTPR mode: + * - Driver only issues DPCD transactions and leaves USB4 tunneling (SET_CONFIG) messages to DPIA. + * - Driver writes TPS1 to DPCD to kick off training. + * - Clock recovery (CR) for link is handled by DPOA, which reports result to DPIA on completion. + * - DPIA communicates result to driver by updating CR status when driver reads DPCD. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + */ +static enum link_training_result dpia_training_cr_transparent( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; + enum dc_status status; + uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */ + uint32_t retry_count = 0; + uint32_t wait_time_microsec = lt_settings->cr_pattern_time; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery. + * Fix inherited from perform_clock_recovery_sequence() - + * the DP equivalent of this function: + * Required for Synaptics MST hub which can put the LT in + * infinite loop by switching the VS between level 0 and level 1 + * continuously. + */ + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + /* Write TPS1 (not VS or PE) to DPCD to start CR phase. + * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to + * start link training. + */ + if (retry_count == 0) { + status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* Check if clock recovery successful. */ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + DC_LOG_HW_LINK_TRAINING("%s: Clock recovery OK\n", __func__); + result = LINK_TRAINING_SUCCESS; + break; + } + + result = dp_get_cr_failure(lane_count, dpcd_lane_status); + + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* Count number of attempts with same drive settings. + * Note: settings are the same for all lanes, + * so comparing first lane is sufficient. + */ + if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET == + dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE)) + retries_cr++; + else + retries_cr = 0; + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + /* Abort link training if clock recovery failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + DPRX, + result, + retry_count); + + return result; +} + +/* Execute clock recovery phase of link training for specified hop in display + * path. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_cr_phase( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0; + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + result = dpia_training_cr_non_transparent(link, link_res, lt_settings, hop); + else + result = dpia_training_cr_transparent(link, link_res, lt_settings); + + return result; +} + +/* Return status read interval during equalization phase. */ +static uint32_t dpia_get_eq_aux_rd_interval( + const struct dc_link *link, + const struct link_training_settings *lt_settings, + uint32_t hop) +{ + uint32_t wait_time_microsec; + + if (hop == DPRX) + wait_time_microsec = lt_settings->eq_pattern_time; + else + wait_time_microsec = + dp_translate_training_aux_read_interval( + link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]); + + /* Check debug option for extending aux read interval. */ + if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval) + wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US; + + return wait_time_microsec; +} + +/* Execute equalization phase of link training for specified hop in display + * path in non-transparent mode: + * - driver issues both DPCD and SET_CONFIG transactions. + * - TPSx is transmitted for any hops downstream of DPOA. + * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA. + * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful. + * - DPRX EQ only reported successful when both DPRX and DPIA requirements (clk sync packets sent) fulfilled. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_eq_non_transparent( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + uint32_t retries_eq = 0; + enum dc_status status; + enum dc_dp_training_pattern tr_pattern; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + uint8_t set_cfg_data; + enum dpia_set_config_ts ts; + + /* Training pattern is TPS4 for repeater; + * TPS2/3/4 for DPRX depending on what it supports. + */ + if (hop == DPRX) + tr_pattern = lt_settings->pattern_for_eq; + else + tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; + + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + + /* DPTX-to-DPIA equalization always successful. */ + if (hop == repeater_cnt) { + result = LINK_TRAINING_SUCCESS; + break; + } + + /* Instruct DPOA to transmit TPSn then update DPCD. */ + if (retries_eq == 0) { + status = convert_trng_ptn_to_trng_stg(tr_pattern, &ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_TRAINING, + ts); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + status = dpcd_set_lt_pattern(link, tr_pattern, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + /* Update DPOA drive settings then DPCD. DPOA only adjusts + * drive settings for hop immediately downstream. + */ + if (hop == repeater_cnt - 1) { + set_cfg_data = dpia_build_set_config_data( + DPIA_SET_CFG_SET_VSPE, + link, + lt_settings); + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_VSPE, + set_cfg_data); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + status = dpcd_set_lane_settings(link, lt_settings, hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* Extend wait time on second equalisation attempt on final hop to + * ensure clock sync packets have been sent. + */ + if (hop == DPRX && retries_eq == 1) + wait_time_microsec = max(wait_time_microsec, (uint32_t) DPIA_CLK_SYNC_DELAY); + else + wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop); + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + hop); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* CR can still fail during EQ phase. Fail training if CR fails. */ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + result = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + result = LINK_TRAINING_SUCCESS; + break; + } + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + + /* Abort link training if equalization failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING( + "%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + retries_eq, + status); + + return result; +} + +/* Execute equalization phase of link training for specified hop in display + * path in transparent LTTPR mode: + * - driver only issues DPCD transactions leaves USB4 tunneling (SET_CONFIG) messages to DPIA. + * - driver writes TPSx to DPCD to notify DPIA that is in equalization phase. + * - equalization (EQ) for link is handled by DPOA, which reports result to DPIA on completion. + * - DPIA communicates result to driver by updating EQ status when driver reads DPCD. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_eq_transparent( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ; + uint32_t retries_eq = 0; + enum dc_status status; + enum dc_dp_training_pattern tr_pattern = lt_settings->pattern_for_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX); + + for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) { + + if (retries_eq == 0) { + status = dpcd_set_lt_pattern(link, tr_pattern, DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + } + + dp_wait_for_training_aux_rd_interval(link, wait_time_microsec); + + /* Read status and adjustment requests from DPCD. */ + status = dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + DPRX); + if (status != DC_OK) { + result = LINK_TRAINING_ABORT; + break; + } + + /* CR can still fail during EQ phase. Fail training if CR fails. */ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + result = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status)) { + /* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4 + * has to share encoders unlike DP and USBC + */ + if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) { + result = LINK_TRAINING_SUCCESS; + break; + } + } + + /* Update VS/PE. */ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + + /* Abort link training if equalization failed due to HPD unplug. */ + if (link->is_hpd_pending) + result = LINK_TRAINING_ABORT; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + DPRX, + result, + retries_eq); + + return result; +} + +/* Execute equalization phase of link training for specified hop in display + * path. + * + * @param link DPIA link being trained. + * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis). + * @param hop Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_eq_phase( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result; + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + result = dpia_training_eq_non_transparent(link, link_res, lt_settings, hop); + else + result = dpia_training_eq_transparent(link, link_res, lt_settings); + + return result; +} + +/* End training of specified hop in display path. */ +static enum dc_status dpcd_clear_lt_pattern( + struct dc_link *link, + uint32_t hop) +{ + union dpcd_training_pattern dpcd_pattern = {0}; + uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; + enum dc_status status; + + if (hop != DPRX) + dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); + + status = core_link_write_dpcd( + link, + dpcd_tps_offset, + &dpcd_pattern.raw, + sizeof(dpcd_pattern.raw)); + + return status; +} + +/* End training of specified hop in display path. + * + * In transparent LTTPR mode: + * - driver clears training pattern for the specified hop in DPCD. + * In non-transparent LTTPR mode: + * - in addition to clearing training pattern, driver issues USB4 tunneling + * (SET_CONFIG) messages to notify DPOA when training is done for first hop + * (DPTX-to-DPIA) and last hop (DPRX). + * + * @param link DPIA link being trained. + * @param hop Hop in display path. DPRX = 0. + */ +static enum link_training_result dpia_training_end( + struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + enum link_training_result result = LINK_TRAINING_SUCCESS; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + enum dc_status status; + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + if (hop == repeater_cnt) { /* DPTX-to-DPIA */ + /* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that + * DPTX-to-DPIA hop trained. No DPCD write needed for first hop. + */ + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_TRAINING, + DPIA_TS_UFP_DONE); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + } else { /* DPOA-to-x */ + /* Write 0x0 to TRAINING_PATTERN_SET */ + status = dpcd_clear_lt_pattern(link, hop); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + } + + /* Notify DPOA that non-transparent link training of DPRX done. */ + if (hop == DPRX && result != LINK_TRAINING_ABORT) { + status = core_link_send_set_config( + link, + DPIA_SET_CFG_SET_TRAINING, + DPIA_TS_DPRX_DONE); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + } + + } else { /* non-LTTPR or transparent LTTPR. */ + + /* Write 0x0 to TRAINING_PATTERN_SET */ + status = dpcd_clear_lt_pattern(link, hop); + if (status != DC_OK) + result = LINK_TRAINING_ABORT; + + } + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + hop, + result, + lt_settings->lttpr_mode); + + return result; +} + +/* When aborting training of specified hop in display path, clean up by: + * - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET. + * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0. + * + * @param link DPIA link being trained. + * @param hop Hop in display path. DPRX = 0. + */ +static void dpia_training_abort( + struct dc_link *link, + struct link_training_settings *lt_settings, + uint32_t hop) +{ + uint8_t data = 0; + uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET; + + DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n", + __func__, + link->link_id.enum_id - ENUM_ID_1, + lt_settings->lttpr_mode, + link->is_hpd_pending); + + /* Abandon clean-up if sink unplugged. */ + if (link->is_hpd_pending) + return; + + if (hop != DPRX) + dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 + + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1)); + + core_link_write_dpcd(link, dpcd_tps_offset, &data, 1); + core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1); + core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1); + core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data); +} + +enum link_training_result dpia_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting, + bool skip_video_pattern) +{ + enum link_training_result result; + struct link_training_settings lt_settings = {0}; + uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */ + int8_t repeater_id; /* Current hop. */ + + struct dc_link_settings link_settings = *link_setting; // non-const copy to pass in + + lt_settings.lttpr_mode = dp_decide_lttpr_mode(link, &link_settings); + + /* Configure link as prescribed in link_setting and set LTTPR mode. */ + result = dpia_configure_link(link, link_res, link_setting, <_settings); + if (result != LINK_TRAINING_SUCCESS) + return result; + + if (lt_settings.lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + /* Train each hop in turn starting with the one closest to DPTX. + * In transparent or non-LTTPR mode, train only the final hop (DPRX). + */ + for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) { + /* Clock recovery. */ + result = dpia_training_cr_phase(link, link_res, <_settings, repeater_id); + if (result != LINK_TRAINING_SUCCESS) + break; + + /* Equalization. */ + result = dpia_training_eq_phase(link, link_res, <_settings, repeater_id); + if (result != LINK_TRAINING_SUCCESS) + break; + + /* Stop training hop. */ + result = dpia_training_end(link, <_settings, repeater_id); + if (result != LINK_TRAINING_SUCCESS) + break; + } + + /* Double-check link status if training successful; gracefully abort + * training of current hop if training failed due to message tunneling + * failure; end training of hop if training ended conventionally and + * falling back to lower bandwidth settings possible. + */ + if (result == LINK_TRAINING_SUCCESS) { + fsleep(5000); + if (!link->is_automated) + result = dp_check_link_loss_status(link, <_settings); + } else if (result == LINK_TRAINING_ABORT) + dpia_training_abort(link, <_settings, repeater_id); + else + dpia_training_end(link, <_settings, repeater_id); + + return result; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h new file mode 100644 index 0000000000..b39fb9faf1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.h @@ -0,0 +1,41 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_TRAINING_DPIA_H__ +#define __DC_LINK_DP_TRAINING_DPIA_H__ +#include "link_dp_training.h" + +/* Train DP tunneling link for USB4 DPIA display endpoint. + * DPIA equivalent of dc_link_dp_perfrorm_link_training. + * Aborts link training upon detection of sink unplug. + */ +enum link_training_result dpia_perform_link_training( + struct dc_link *link, + const struct link_resource *link_res, + const struct dc_link_settings *link_setting, + bool skip_video_pattern); + +#endif /* __DC_LINK_DP_TRAINING_DPIA_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c new file mode 100644 index 0000000000..68096d12f5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c @@ -0,0 +1,891 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements 8b/10b link training specially modified to support an + * embedded retimer chip. This retimer chip is referred as fixed vs pe retimer. + * Unlike native dp connection this chip requires a modified link training + * protocol based on 8b/10b link training. Since this is a non standard sequence + * and we must support this hardware, we decided to isolate it in its own + * training sequence inside its own file. + */ +#include "link_dp_training_fixed_vs_pe_retimer.h" +#include "link_dp_training_8b_10b.h" +#include "link_dpcd.h" +#include "link_dp_phy.h" +#include "link_dp_capability.h" +#include "link_ddc.h" + +#define DC_LOGGER \ + link->ctx->logger + +void dp_fixed_vs_pe_read_lane_adjust( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]) +{ + const uint8_t vendor_lttpr_write_data_vs[3] = {0x0, 0x53, 0x63}; + const uint8_t vendor_lttpr_write_data_pe[3] = {0x0, 0x54, 0x63}; + uint8_t dprx_vs = 0; + uint8_t dprx_pe = 0; + uint8_t lane; + + /* W/A to read lane settings requested by DPRX */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + + link_query_fixed_vs_pe_retimer(link->ddc, &dprx_vs, 1); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + link_query_fixed_vs_pe_retimer(link->ddc, &dprx_pe, 1); + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET = (dprx_vs >> (2 * lane)) & 0x3; + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET = (dprx_pe >> (2 * lane)) & 0x3; + } +} + + +void dp_fixed_vs_pe_set_retimer_lane_settings( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count) +{ + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + uint8_t lane = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Force LTTPR to output desired VS and PE */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); +} + +static enum link_training_result perform_fixed_vs_pe_nontransparent_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + uint8_t toggle_rate = 0x6; + uint8_t target_rate = 0x6; + bool apply_toggle_rate_wa = false; + uint8_t repeater_cnt; + uint8_t repeater_id; + + /* Fixed VS/PE specific: Force CR AUX RD Interval to at least 16ms */ + if (lt_settings->cr_pattern_time < 16000) + lt_settings->cr_pattern_time = 16000; + + /* Fixed VS/PE specific: Toggle link rate */ + apply_toggle_rate_wa = ((link->vendor_specific_lttpr_link_rate_wa == target_rate) || (link->vendor_specific_lttpr_link_rate_wa == 0)); + target_rate = get_dpcd_link_rate(<_settings->link_settings); + toggle_rate = (target_rate == 0x6) ? 0xA : 0x6; + + if (apply_toggle_rate_wa) + lt_settings->link_settings.link_rate = toggle_rate; + + if (link->ctx->dc->work_arounds.lt_early_cr_pattern) + start_clock_recovery_pattern_early(link, link_res, lt_settings, DPRX); + + /* 1. set link rate, lane count and spread. */ + dpcd_set_link_settings(link, lt_settings); + + /* Fixed VS/PE specific: Toggle link rate back*/ + if (apply_toggle_rate_wa) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &target_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = target_rate; + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + + /* 2. perform link training (set link training done + * to false is done as well) + */ + repeater_cnt = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + + for (repeater_id = repeater_cnt; (repeater_id > 0 && status == LINK_TRAINING_SUCCESS); + repeater_id--) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) { + repeater_training_done(link, repeater_id); + break; + } + + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + repeater_id); + + repeater_training_done(link, repeater_id); + + if (status != LINK_TRAINING_SUCCESS) + break; + + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { + lt_settings->dpcd_lane_settings[lane].raw = 0; + lt_settings->hw_lane_settings[lane].VOLTAGE_SWING = 0; + lt_settings->hw_lane_settings[lane].PRE_EMPHASIS = 0; + } + } + } + + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_clock_recovery_sequence(link, link_res, lt_settings, DPRX); + if (status == LINK_TRAINING_SUCCESS) { + status = perform_8b_10b_channel_equalization_sequence(link, + link_res, + lt_settings, + DPRX); + } + } + + return status; +} + + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x68}; + uint32_t pre_disable_intercept_delay_ms = 0; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + uint8_t toggle_rate; + uint8_t rate; + + /* Only 8b/10b is supported */ + ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING); + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); + return status; + } + + if (offset != 0xFF) { + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; + + /* Certain display and cable configuration require extra delay */ + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } + } + + /* Vendor specific: Reset lane settings */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + /* Vendor specific: Enable intercept */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); + + + /* 1. set link rate, lane count and spread. */ + + downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + + rate = get_dpcd_link_rate(<_settings->link_settings); + + /* Vendor specific: Toggle link rate */ + toggle_rate = (rate == 0x6) ? 0xA : 0x6; + + if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = rate; + + core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); + } + + /* 2. Perform link training */ + + /* Perform Clock Recovery Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + const uint8_t max_vendor_dpcd_retries = 10; + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + uint8_t i = 0; + + retries_cr = 0; + retry_count = 0; + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + + /* 1. call HWSS to set lane settings */ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + 0); + + /* 2. update DPCD of the receiver */ + if (!retry_count) { + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration. + */ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + 0); + /* Vendor specific: Disable intercept */ + for (i = 0; i < max_vendor_dpcd_retries; i++) { + if (pre_disable_intercept_delay_ms != 0) + msleep(pre_disable_intercept_delay_ms); + if (link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_dis[0], + sizeof(vendor_lttpr_write_data_intercept_dis))) + break; + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], + sizeof(vendor_lttpr_write_data_intercept_en)); + } + } else { + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + dpcd_set_lane_settings( + link, + lt_settings, + 0); + } + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 6. max VS reached*/ + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings */ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient + */ + if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + status = dp_get_cr_failure(lane_count, dpcd_lane_status); + } + + /* Perform Channel EQ Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); + + status = LINK_TRAINING_EQ_FAIL_EQ; + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, 0); + + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + /* 2. update DPCD*/ + if (!retries_ch_eq) + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, 0); + else + dpcd_set_lane_settings(link, lt_settings, 0); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + } + + return status; +} + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings) +{ + const uint8_t vendor_lttpr_write_data_reset[4] = {0x1, 0x50, 0x63, 0xFF}; + const uint8_t offset = dp_parse_lttpr_repeater_count( + link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + const uint8_t vendor_lttpr_write_data_intercept_en[4] = {0x1, 0x55, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_intercept_dis[4] = {0x1, 0x55, 0x63, 0x6E}; + const uint8_t vendor_lttpr_write_data_adicora_eq1[4] = {0x1, 0x55, 0x63, 0x2E}; + const uint8_t vendor_lttpr_write_data_adicora_eq2[4] = {0x1, 0x55, 0x63, 0x01}; + const uint8_t vendor_lttpr_write_data_adicora_eq3[4] = {0x1, 0x55, 0x63, 0x68}; + uint32_t pre_disable_intercept_delay_ms = 0; + uint8_t vendor_lttpr_write_data_vs[4] = {0x1, 0x51, 0x63, 0x0}; + uint8_t vendor_lttpr_write_data_pe[4] = {0x1, 0x52, 0x63, 0x0}; + const uint8_t vendor_lttpr_write_data_4lane_1[4] = {0x1, 0x6E, 0xF2, 0x19}; + const uint8_t vendor_lttpr_write_data_4lane_2[4] = {0x1, 0x6B, 0xF2, 0x01}; + const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18}; + const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03}; + const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06}; + enum link_training_result status = LINK_TRAINING_SUCCESS; + uint8_t lane = 0; + union down_spread_ctrl downspread = {0}; + union lane_count_set lane_count_set = {0}; + uint8_t toggle_rate; + uint8_t rate; + + /* Only 8b/10b is supported */ + ASSERT(link_dp_get_encoding_format(<_settings->link_settings) == + DP_8b_10b_ENCODING); + + if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) { + status = perform_fixed_vs_pe_nontransparent_training_sequence(link, link_res, lt_settings); + return status; + } + + if (offset != 0xFF) { + if (offset == 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa; + + /* Certain display and cable configuration require extra delay */ + } else if (offset > 2) { + pre_disable_intercept_delay_ms = link->dc->debug.fixed_vs_aux_delay_config_wa * 2; + } + } + + /* Vendor specific: Reset lane settings */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_reset[0], sizeof(vendor_lttpr_write_data_reset)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + /* Vendor specific: Enable intercept */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], sizeof(vendor_lttpr_write_data_intercept_en)); + + /* 1. set link rate, lane count and spread. */ + + downspread.raw = (uint8_t)(lt_settings->link_settings.link_spread); + + lane_count_set.bits.LANE_COUNT_SET = + lt_settings->link_settings.lane_count; + + lane_count_set.bits.ENHANCED_FRAMING = lt_settings->enhanced_framing; + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = 0; + + + if (lt_settings->pattern_for_eq < DP_TRAINING_PATTERN_SEQUENCE_4) { + lane_count_set.bits.POST_LT_ADJ_REQ_GRANTED = + link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED; + } + + core_link_write_dpcd(link, DP_DOWNSPREAD_CTRL, + &downspread.raw, sizeof(downspread)); + + core_link_write_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, 1); + + rate = get_dpcd_link_rate(<_settings->link_settings); + + /* Vendor specific: Toggle link rate */ + toggle_rate = (rate == 0x6) ? 0xA : 0x6; + + if (link->vendor_specific_lttpr_link_rate_wa == rate || link->vendor_specific_lttpr_link_rate_wa == 0) { + core_link_write_dpcd( + link, + DP_LINK_BW_SET, + &toggle_rate, + 1); + } + + link->vendor_specific_lttpr_link_rate_wa = rate; + + core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1); + + DC_LOG_HW_LINK_TRAINING("%s\n %x rate = %x\n %x lane = %x framing = %x\n %x spread = %x\n", + __func__, + DP_LINK_BW_SET, + lt_settings->link_settings.link_rate, + DP_LANE_COUNT_SET, + lt_settings->link_settings.lane_count, + lt_settings->enhanced_framing, + DP_DOWNSPREAD_CTRL, + lt_settings->link_settings.link_spread); + + if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) { + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_2[0], sizeof(vendor_lttpr_write_data_4lane_2)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_3[0], sizeof(vendor_lttpr_write_data_4lane_3)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_4[0], sizeof(vendor_lttpr_write_data_4lane_4)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_4lane_5[0], sizeof(vendor_lttpr_write_data_4lane_5)); + } + + /* 2. Perform link training */ + + /* Perform Clock Recovery Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + const uint8_t max_vendor_dpcd_retries = 10; + uint32_t retries_cr; + uint32_t retry_count; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX]; + union lane_align_status_updated dpcd_lane_status_updated; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + uint8_t i = 0; + + retries_cr = 0; + retry_count = 0; + + memset(&dpcd_lane_status, '\0', sizeof(dpcd_lane_status)); + memset(&dpcd_lane_status_updated, '\0', + sizeof(dpcd_lane_status_updated)); + + while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) && + (retry_count < LINK_TRAINING_MAX_CR_RETRY)) { + + + /* 1. call HWSS to set lane settings */ + dp_set_hw_lane_settings( + link, + link_res, + lt_settings, + 0); + + /* 2. update DPCD of the receiver */ + if (!retry_count) { + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration. + */ + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + lt_settings->pattern_for_cr, + 0); + /* Vendor specific: Disable intercept */ + for (i = 0; i < max_vendor_dpcd_retries; i++) { + if (pre_disable_intercept_delay_ms != 0) + msleep(pre_disable_intercept_delay_ms); + if (link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_dis[0], + sizeof(vendor_lttpr_write_data_intercept_dis))) + break; + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_intercept_en[0], + sizeof(vendor_lttpr_write_data_intercept_en)); + } + } else { + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + dpcd_set_lane_settings( + link, + lt_settings, + 0); + } + + /* 3. wait receiver to lock-on*/ + wait_time_microsec = lt_settings->cr_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested drive + * settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 6. max VS reached*/ + if (dp_is_max_vs_reached(lt_settings)) + break; + + /* 7. same lane settings */ + /* Note: settings are the same for all lanes, + * so comparing first lane is sufficient + */ + if (lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == + dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) + retries_cr++; + else + retries_cr = 0; + + /* 8. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + retry_count++; + } + + if (retry_count >= LINK_TRAINING_MAX_CR_RETRY) { + ASSERT(0); + DC_LOG_ERROR("%s: Link Training Error, could not get CR after %d tries. Possibly voltage swing issue", + __func__, + LINK_TRAINING_MAX_CR_RETRY); + + } + + status = dp_get_cr_failure(lane_count, dpcd_lane_status); + } + + /* Perform Channel EQ Sequence */ + if (status == LINK_TRAINING_SUCCESS) { + enum dc_dp_training_pattern tr_pattern; + uint32_t retries_ch_eq; + uint32_t wait_time_microsec; + enum dc_lane_count lane_count = lt_settings->link_settings.lane_count; + union lane_align_status_updated dpcd_lane_status_updated = {0}; + union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0}; + union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0}; + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_adicora_eq1[0], + sizeof(vendor_lttpr_write_data_adicora_eq1)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_adicora_eq2[0], + sizeof(vendor_lttpr_write_data_adicora_eq2)); + + + /* Note: also check that TPS4 is a supported feature*/ + tr_pattern = lt_settings->pattern_for_eq; + + dp_set_hw_training_pattern(link, link_res, tr_pattern, 0); + + status = LINK_TRAINING_EQ_FAIL_EQ; + + for (retries_ch_eq = 0; retries_ch_eq <= LINK_TRAINING_MAX_RETRY_COUNT; + retries_ch_eq++) { + + dp_set_hw_lane_settings(link, link_res, lt_settings, 0); + + vendor_lttpr_write_data_vs[3] = 0; + vendor_lttpr_write_data_pe[3] = 0; + + for (lane = 0; lane < lane_count; lane++) { + vendor_lttpr_write_data_vs[3] |= + lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET << (2 * lane); + vendor_lttpr_write_data_pe[3] |= + lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET << (2 * lane); + } + + /* Vendor specific: Update VS and PE to DPRX requested value */ + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_vs[0], sizeof(vendor_lttpr_write_data_vs)); + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_pe[0], sizeof(vendor_lttpr_write_data_pe)); + + /* 2. update DPCD*/ + if (!retries_ch_eq) { + /* EPR #361076 - write as a 5-byte burst, + * but only for the 1-st iteration + */ + + dpcd_set_lt_pattern_and_lane_settings( + link, + lt_settings, + tr_pattern, 0); + + link_configure_fixed_vs_pe_retimer(link->ddc, + &vendor_lttpr_write_data_adicora_eq3[0], + sizeof(vendor_lttpr_write_data_adicora_eq3)); + + } else + dpcd_set_lane_settings(link, lt_settings, 0); + + /* 3. wait for receiver to lock-on*/ + wait_time_microsec = lt_settings->eq_pattern_time; + + dp_wait_for_training_aux_rd_interval( + link, + wait_time_microsec); + + /* 4. Read lane status and requested + * drive settings as set by the sink + */ + dp_get_lane_status_and_lane_adjust( + link, + lt_settings, + dpcd_lane_status, + &dpcd_lane_status_updated, + dpcd_lane_adjust, + 0); + + /* 5. check CR done*/ + if (!dp_is_cr_done(lane_count, dpcd_lane_status)) { + status = LINK_TRAINING_EQ_FAIL_CR; + break; + } + + /* 6. check CHEQ done*/ + if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) && + dp_is_symbol_locked(lane_count, dpcd_lane_status) && + dp_is_interlane_aligned(dpcd_lane_status_updated)) { + status = LINK_TRAINING_SUCCESS; + break; + } + + /* 7. update VS/PE/PC2 in lt_settings*/ + dp_decide_lane_settings(lt_settings, dpcd_lane_adjust, + lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); + } + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h new file mode 100644 index 0000000000..c0d6ea3295 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.h @@ -0,0 +1,50 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#define __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ +#include "link_dp_training.h" + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +enum link_training_result dp_perform_fixed_vs_pe_training_sequence( + struct dc_link *link, + const struct link_resource *link_res, + struct link_training_settings *lt_settings); + +void dp_fixed_vs_pe_set_retimer_lane_settings( + struct dc_link *link, + const union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX], + uint8_t lane_count); + +void dp_fixed_vs_pe_read_lane_adjust( + struct dc_link *link, + union dpcd_training_lane dpcd_lane_adjust[LANE_COUNT_DP_MAX]); + +#endif /* __DC_LINK_DP_FIXED_VS_PE_RETIMER_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c new file mode 100644 index 0000000000..fc50931c2a --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -0,0 +1,249 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements basic dpcd read/write functionality. It also does basic + * dpcd range check to ensure that every dpcd request is compliant with specs + * range requirements. + */ + +#include "link_dpcd.h" +#include +#include "dm_helpers.h" + +#define END_ADDRESS(start, size) (start + size - 1) +#define ADDRESS_RANGE_SIZE(start, end) (end - start + 1) +struct dpcd_address_range { + uint32_t start; + uint32_t end; +}; + +static enum dc_status internal_link_read_dpcd( + struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size) +{ + if (!link->aux_access_disabled && + !dm_helpers_dp_read_dpcd(link->ctx, + link, address, data, size)) { + return DC_ERROR_UNEXPECTED; + } + + return DC_OK; +} + +static enum dc_status internal_link_write_dpcd( + struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size) +{ + if (!link->aux_access_disabled && + !dm_helpers_dp_write_dpcd(link->ctx, + link, address, data, size)) { + return DC_ERROR_UNEXPECTED; + } + + return DC_OK; +} + +/* + * Partition the entire DPCD address space + * XXX: This partitioning must cover the entire DPCD address space, + * and must contain no gaps or overlapping address ranges. + */ +static const struct dpcd_address_range mandatory_dpcd_partitions[] = { + { 0, DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1) - 1}, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR1), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR2), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR3), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR4), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR5), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR6), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR7), DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8) - 1 }, + { DP_TRAINING_PATTERN_SET_PHY_REPEATER(DP_PHY_LTTPR8), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 }, + /* + * The FEC registers are contiguous + */ + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR1) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR2) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR3) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR4) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR5) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR6) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7), DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR7) - 1 }, + { DP_FEC_STATUS_PHY_REPEATER(DP_PHY_LTTPR8), DP_LTTPR_MAX_ADD }, + /* all remaining DPCD addresses */ + { DP_LTTPR_MAX_ADD + 1, DP_DPCD_MAX_ADD } }; + +static inline bool do_addresses_intersect_with_range( + const struct dpcd_address_range *range, + const uint32_t start_address, + const uint32_t end_address) +{ + return start_address <= range->end && end_address >= range->start; +} + +static uint32_t dpcd_get_next_partition_size(const uint32_t address, const uint32_t size) +{ + const uint32_t end_address = END_ADDRESS(address, size); + uint32_t partition_iterator = 0; + + /* + * find current partition + * this loop spins forever if partition map above is not surjective + */ + while (!do_addresses_intersect_with_range(&mandatory_dpcd_partitions[partition_iterator], + address, end_address)) + partition_iterator++; + if (end_address < mandatory_dpcd_partitions[partition_iterator].end) + return size; + return ADDRESS_RANGE_SIZE(address, mandatory_dpcd_partitions[partition_iterator].end); +} + +/* + * Ranges of DPCD addresses that must be read in a single transaction + * XXX: Do not allow any two address ranges in this array to overlap + */ +static const struct dpcd_address_range mandatory_dpcd_blocks[] = { + { DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT }}; + +/* + * extend addresses to read all mandatory blocks together + */ +static void dpcd_extend_address_range( + const uint32_t in_address, + uint8_t * const in_data, + const uint32_t in_size, + uint32_t *out_address, + uint8_t **out_data, + uint32_t *out_size) +{ + const uint32_t end_address = END_ADDRESS(in_address, in_size); + const struct dpcd_address_range *addr_range; + struct dpcd_address_range new_addr_range; + uint32_t i; + + new_addr_range.start = in_address; + new_addr_range.end = end_address; + for (i = 0; i < ARRAY_SIZE(mandatory_dpcd_blocks); i++) { + addr_range = &mandatory_dpcd_blocks[i]; + if (addr_range->start <= in_address && addr_range->end >= in_address) + new_addr_range.start = addr_range->start; + + if (addr_range->start <= end_address && addr_range->end >= end_address) + new_addr_range.end = addr_range->end; + } + *out_address = in_address; + *out_size = in_size; + *out_data = in_data; + if (new_addr_range.start != in_address || new_addr_range.end != end_address) { + *out_address = new_addr_range.start; + *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end); + *out_data = kzalloc(*out_size * sizeof(**out_data), GFP_KERNEL); + } +} + +/* + * Reduce the AUX reply down to the values the caller requested + */ +static void dpcd_reduce_address_range( + const uint32_t extended_address, + uint8_t * const extended_data, + const uint32_t extended_size, + const uint32_t reduced_address, + uint8_t * const reduced_data, + const uint32_t reduced_size) +{ + const uint32_t offset = reduced_address - extended_address; + + /* + * If the address is same, address was not extended. + * So we do not need to free any memory. + * The data is in original buffer(reduced_data). + */ + if (extended_data == reduced_data) + return; + + memcpy(&extended_data[offset], reduced_data, reduced_size); + kfree(extended_data); +} + +enum dc_status core_link_read_dpcd( + struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size) +{ + uint32_t extended_address; + uint32_t partitioned_address; + uint8_t *extended_data; + uint32_t extended_size; + /* size of the remaining partitioned address space */ + uint32_t size_left_to_read; + enum dc_status status = DC_ERROR_UNEXPECTED; + /* size of the next partition to be read from */ + uint32_t partition_size; + uint32_t data_index = 0; + + dpcd_extend_address_range(address, data, size, &extended_address, &extended_data, &extended_size); + partitioned_address = extended_address; + size_left_to_read = extended_size; + while (size_left_to_read) { + partition_size = dpcd_get_next_partition_size(partitioned_address, size_left_to_read); + status = internal_link_read_dpcd(link, partitioned_address, &extended_data[data_index], partition_size); + if (status != DC_OK) + break; + partitioned_address += partition_size; + data_index += partition_size; + size_left_to_read -= partition_size; + } + dpcd_reduce_address_range(extended_address, extended_data, extended_size, address, data, size); + return status; +} + +enum dc_status core_link_write_dpcd( + struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size) +{ + uint32_t partition_size; + uint32_t data_index = 0; + enum dc_status status = DC_ERROR_UNEXPECTED; + + while (size) { + partition_size = dpcd_get_next_partition_size(address, size); + status = internal_link_write_dpcd(link, address, &data[data_index], partition_size); + if (status != DC_OK) + break; + address += partition_size; + data_index += partition_size; + size -= partition_size; + } + return status; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h new file mode 100644 index 0000000000..08d787a1e4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.h @@ -0,0 +1,42 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __LINK_DPCD_H__ +#define __LINK_DPCD_H__ +#include "link.h" +#include "dpcd_defs.h" + +enum dc_status core_link_read_dpcd( + struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size); + +enum dc_status core_link_write_dpcd( + struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size); +#endif diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c new file mode 100644 index 0000000000..6f64aab18f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -0,0 +1,1079 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * This file implements retrieval and configuration of eDP panel features such + * as PSR and ABM and it also manages specs defined eDP panel power sequences. + */ + +#include "link_edp_panel_control.h" +#include "link_dpcd.h" +#include "link_dp_capability.h" +#include "dm_helpers.h" +#include "dal_asic_id.h" +#include "link_dp_phy.h" +#include "dce/dmub_psr.h" +#include "dc/dc_dmub_srv.h" +#include "dce/dmub_replay.h" +#include "abm.h" +#define DC_LOGGER_INIT(logger) + +#define DP_SINK_PR_ENABLE_AND_CONFIGURATION 0x37B + +/* Travis */ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_2[] = "sivarT"; +/* Nutmeg */ +static const uint8_t DP_VGA_LVDS_CONVERTER_ID_3[] = "dnomlA"; + +void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode) +{ + union dpcd_edp_config edp_config_set; + bool panel_mode_edp = false; + enum dc_status result; + + memset(&edp_config_set, '\0', sizeof(union dpcd_edp_config)); + + switch (panel_mode) { + case DP_PANEL_MODE_EDP: + case DP_PANEL_MODE_SPECIAL: + panel_mode_edp = true; + break; + + default: + break; + } + + /*set edp panel mode in receiver*/ + result = core_link_read_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); + + if (result == DC_OK && + edp_config_set.bits.PANEL_MODE_EDP + != panel_mode_edp) { + + edp_config_set.bits.PANEL_MODE_EDP = + panel_mode_edp; + result = core_link_write_dpcd( + link, + DP_EDP_CONFIGURATION_SET, + &edp_config_set.raw, + sizeof(edp_config_set.raw)); + + ASSERT(result == DC_OK); + } + + link->panel_mode = panel_mode; + DC_LOG_DETECTION_DP_CAPS("Link: %d eDP panel mode supported: %d " + "eDP panel mode enabled: %d \n", + link->link_index, + link->dpcd_caps.panel_mode_edp, + panel_mode_edp); +} + +enum dp_panel_mode dp_get_panel_mode(struct dc_link *link) +{ + /* We need to explicitly check that connector + * is not DP. Some Travis_VGA get reported + * by video bios as DP. + */ + if (link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT) { + + switch (link->dpcd_caps.branch_dev_id) { + case DP_BRANCH_DEVICE_ID_0022B9: + /* alternate scrambler reset is required for Travis + * for the case when external chip does not + * provide sink device id, alternate scrambler + * scheme will be overriden later by querying + * Encoder features + */ + if (strncmp( + link->dpcd_caps.branch_dev_name, + DP_VGA_LVDS_CONVERTER_ID_2, + sizeof( + link->dpcd_caps. + branch_dev_name)) == 0) { + return DP_PANEL_MODE_SPECIAL; + } + break; + case DP_BRANCH_DEVICE_ID_00001A: + /* alternate scrambler reset is required for Travis + * for the case when external chip does not provide + * sink device id, alternate scrambler scheme will + * be overriden later by querying Encoder feature + */ + if (strncmp(link->dpcd_caps.branch_dev_name, + DP_VGA_LVDS_CONVERTER_ID_3, + sizeof( + link->dpcd_caps. + branch_dev_name)) == 0) { + return DP_PANEL_MODE_SPECIAL; + } + break; + default: + break; + } + } + + if (link->dpcd_caps.panel_mode_edp && + (link->connector_signal == SIGNAL_TYPE_EDP || + (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && + link->is_internal_display))) { + return DP_PANEL_MODE_EDP; + } + + return DP_PANEL_MODE_DEFAULT; +} + +bool edp_set_backlight_level_nits(struct dc_link *link, + bool isHDR, + uint32_t backlight_millinits, + uint32_t transition_time_in_ms) +{ + struct dpcd_source_backlight_set dpcd_backlight_set; + uint8_t backlight_control = isHDR ? 1 : 0; + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + // OLEDs have no PWM, they can only use AUX + if (link->dpcd_sink_ext_caps.bits.oled == 1) + backlight_control = 1; + + *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits; + *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; + + + if (!link->dpcd_caps.panel_luminance_control) { + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)(&dpcd_backlight_set), + sizeof(dpcd_backlight_set)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_control, 1) != DC_OK) + return false; + } else { + const uint8_t backlight_enable = DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + struct target_luminance_value *target_luminance = NULL; + + //if target luminance value is greater than 24 bits, clip the value to 24 bits + if (backlight_millinits > 0xFFFFFF) + backlight_millinits = 0xFFFFFF; + + target_luminance = (struct target_luminance_value *)&backlight_millinits; + + if (core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, + sizeof(backlight_enable)) != DC_OK) + return false; + + if (core_link_write_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)(target_luminance), + sizeof(struct target_luminance_value)) != DC_OK) + return false; + } + + return true; +} + +bool edp_get_backlight_level_nits(struct dc_link *link, + uint32_t *backlight_millinits_avg, + uint32_t *backlight_millinits_peak) +{ + union dpcd_source_backlight_get dpcd_backlight_get; + + memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get)); + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, + dpcd_backlight_get.raw, + sizeof(union dpcd_source_backlight_get))) + return false; + + *backlight_millinits_avg = + dpcd_backlight_get.bytes.backlight_millinits_avg; + *backlight_millinits_peak = + dpcd_backlight_get.bytes.backlight_millinits_peak; + + /* On non-supported panels dpcd_read usually succeeds with 0 returned */ + if (*backlight_millinits_avg == 0 || + *backlight_millinits_avg > *backlight_millinits_peak) + return false; + + return true; +} + +bool edp_backlight_enable_aux(struct dc_link *link, bool enable) +{ + uint8_t backlight_enable = enable ? 1 : 0; + + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, + &backlight_enable, 1) != DC_OK) + return false; + + return true; +} + +// we read default from 0x320 because we expect BIOS wrote it there +// regular get_backlight_nit reads from panel set at 0x326 +static bool read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits) +{ + if (!link || (link->connector_signal != SIGNAL_TYPE_EDP && + link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) + return false; + + if (!link->dpcd_caps.panel_luminance_control) { + if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, + (uint8_t *)backlight_millinits, + sizeof(uint32_t))) + return false; + } else { + //setting to 0 as a precaution, since target_luminance_value is 3 bytes + memset(backlight_millinits, 0, sizeof(uint32_t)); + + if (!core_link_read_dpcd(link, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + (uint8_t *)backlight_millinits, + sizeof(struct target_luminance_value))) + return false; + } + + return true; +} + +bool set_default_brightness_aux(struct dc_link *link) +{ + uint32_t default_backlight; + + if (link && link->dpcd_sink_ext_caps.bits.oled == 1) { + if (!read_default_bl_aux(link, &default_backlight)) + default_backlight = 150000; + // if < 1 nits or > 5000, it might be wrong readback + if (default_backlight < 1000 || default_backlight > 5000000) + default_backlight = 150000; + + return edp_set_backlight_level_nits(link, true, + default_backlight, 0); + } + return false; +} + +bool edp_is_ilr_optimization_enabled(struct dc_link *link) +{ + if (link->dpcd_caps.edp_supported_link_rates_count == 0 || !link->panel_config.ilr.optimize_edp_link_rate) + return false; + return true; +} + +enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link) +{ + enum dc_link_rate link_rate = link->reported_link_cap.link_rate; + + for (int i = 0; i < link->dpcd_caps.edp_supported_link_rates_count; i++) { + if (link_rate < link->dpcd_caps.edp_supported_link_rates[i]) + link_rate = link->dpcd_caps.edp_supported_link_rates[i]; + } + + return link_rate; +} + +bool edp_is_ilr_optimization_required(struct dc_link *link, + struct dc_crtc_timing *crtc_timing) +{ + struct dc_link_settings link_setting; + uint8_t link_bw_set; + uint8_t link_rate_set; + uint32_t req_bw; + union lane_count_set lane_count_set = {0}; + + ASSERT(link || crtc_timing); // invalid input + + if (!edp_is_ilr_optimization_enabled(link)) + return false; + + + // Read DPCD 00100h to find if standard link rates are set + core_link_read_dpcd(link, DP_LINK_BW_SET, + &link_bw_set, sizeof(link_bw_set)); + + if (link_bw_set) { + DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS used link_bw_set\n"); + return true; + } + + // Read DPCD 00115h to find the edp link rate set used + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // Read DPCD 00101h to find out the number of lanes currently set + core_link_read_dpcd(link, DP_LANE_COUNT_SET, + &lane_count_set.raw, sizeof(lane_count_set)); + + req_bw = dc_bandwidth_in_kbps_from_timing(crtc_timing, dc_link_get_highest_encoding_format(link)); + + if (!crtc_timing->flags.DSC) + edp_decide_link_settings(link, &link_setting, req_bw); + else + decide_edp_link_settings_with_dsc(link, &link_setting, req_bw, LINK_RATE_UNKNOWN); + + if (link->dpcd_caps.edp_supported_link_rates[link_rate_set] != link_setting.link_rate || + lane_count_set.bits.LANE_COUNT_SET != link_setting.lane_count) { + DC_LOG_EVENT_LINK_TRAINING("eDP ILR: Optimization required, VBIOS link_rate_set not optimal\n"); + return true; + } + + DC_LOG_EVENT_LINK_TRAINING("eDP ILR: No optimization required, VBIOS set optimal link_rate_set\n"); + return false; +} + +void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd) +{ + if (link->connector_signal != SIGNAL_TYPE_EDP) + return; + + link->dc->hwss.edp_power_control(link, true); + if (wait_for_hpd) + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + if (link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, true); +} + +void edp_set_panel_power(struct dc_link *link, bool powerOn) +{ + if (powerOn) { + // 1. panel VDD on + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, true); + link->dc->hwss.edp_wait_for_hpd_ready(link, true); + + // 2. panel BL on + if (link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, true); + + // 3. Rx power on + dpcd_write_rx_power_ctrl(link, true); + } else { + // 3. Rx power off + dpcd_write_rx_power_ctrl(link, false); + + // 2. panel BL off + if (link->dc->hwss.edp_backlight_control) + link->dc->hwss.edp_backlight_control(link, false); + + // 1. panel VDD off + if (!link->dc->config.edp_no_power_sequencing) + link->dc->hwss.edp_power_control(link, false); + } +} + +bool edp_wait_for_t12(struct dc_link *link) +{ + if (link->connector_signal == SIGNAL_TYPE_EDP && link->dc->hwss.edp_wait_for_T12) { + link->dc->hwss.edp_wait_for_T12(link); + + return true; + } + + return false; +} + +void edp_add_delay_for_T9(struct dc_link *link) +{ + if (link && link->panel_config.pps.extra_delay_backlight_off > 0) + fsleep(link->panel_config.pps.extra_delay_backlight_off * 1000); +} + +bool edp_receiver_ready_T9(struct dc_link *link) +{ + unsigned int tries = 0; + unsigned char sinkstatus = 0; + unsigned char edpRev = 0; + enum dc_status result = DC_OK; + + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + if (result == DC_OK && edpRev >= DP_EDP_12) { + do { + sinkstatus = 1; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 0) + break; + if (result != DC_OK) + break; + udelay(100); //MAx T9 + } while (++tries < 50); + } + + return result; +} + +bool edp_receiver_ready_T7(struct dc_link *link) +{ + unsigned char sinkstatus = 0; + unsigned char edpRev = 0; + enum dc_status result = DC_OK; + + /* use absolute time stamp to constrain max T7*/ + unsigned long long enter_timestamp = 0; + unsigned long long finish_timestamp = 0; + unsigned long long time_taken_in_ns = 0; + + result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev)); + + if (result == DC_OK && edpRev >= DP_EDP_12) { + /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/ + enter_timestamp = dm_get_timestamp(link->ctx); + do { + sinkstatus = 0; + result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus)); + if (sinkstatus == 1) + break; + if (result != DC_OK) + break; + udelay(25); + finish_timestamp = dm_get_timestamp(link->ctx); + time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp); + } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms + } + + if (link && link->panel_config.pps.extra_t7_ms > 0) + fsleep(link->panel_config.pps.extra_t7_ms * 1000); + + return result; +} + +bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable) +{ + bool ret = false; + union dpcd_alpm_configuration alpm_config; + + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + memset(&alpm_config, 0, sizeof(alpm_config)); + + alpm_config.bits.ENABLE = (enable ? true : false); + ret = dm_helpers_dp_write_dpcd(link->ctx, link, + DP_RECEIVER_ALPM_CONFIG, &alpm_config.raw, + sizeof(alpm_config.raw)); + } + return ret; +} + +static struct pipe_ctx *get_pipe_from_link(const struct dc_link *link) +{ + int i; + struct dc *dc = link->ctx->dc; + struct pipe_ctx *pipe_ctx = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream->link == link) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + break; + } + } + } + + return pipe_ctx; +} + +bool edp_set_backlight_level(const struct dc_link *link, + uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp) +{ + struct dc *dc = link->ctx->dc; + + DC_LOGGER_INIT(link->ctx->logger); + DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", + backlight_pwm_u16_16, backlight_pwm_u16_16); + + if (dc_is_embedded_signal(link->connector_signal)) { + struct pipe_ctx *pipe_ctx = get_pipe_from_link(link); + + if (pipe_ctx) { + /* Disable brightness ramping when the display is blanked + * as it can hang the DMCU + */ + if (pipe_ctx->plane_state == NULL) + frame_ramp = 0; + } else { + return false; + } + + dc->hwss.set_backlight_level( + pipe_ctx, + backlight_pwm_u16_16, + frame_ramp); + } + return true; +} + +bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; + unsigned int panel_inst; + + if (psr == NULL && force_static) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if ((allow_active != NULL) && (*allow_active == true) && (link->type == dc_connection_none)) { + // Don't enter PSR if panel is not connected + return false; + } + + /* Set power optimization flag */ + if (power_opts && link->psr_settings.psr_power_opt != *power_opts) { + link->psr_settings.psr_power_opt = *power_opts; + + if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt) + psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt, panel_inst); + } + + if (psr != NULL && link->psr_settings.psr_feature_enabled && + force_static && psr->funcs->psr_force_static) + psr->funcs->psr_force_static(psr, panel_inst); + + /* Enable or Disable PSR */ + if (allow_active && link->psr_settings.psr_allow_active != *allow_active) { + link->psr_settings.psr_allow_active = *allow_active; + + if (!link->psr_settings.psr_allow_active) + dc_z10_restore(dc); + + if (psr != NULL && link->psr_settings.psr_feature_enabled) { + psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst); + } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && + link->psr_settings.psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait); + else + return false; + } + return true; +} + +bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state) +{ + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + struct dmub_psr *psr = dc->res_pool->psr; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (psr != NULL && link->psr_settings.psr_feature_enabled) + psr->funcs->psr_get_state(psr, state, panel_inst); + else if (dmcu != NULL && link->psr_settings.psr_feature_enabled) + dmcu->funcs->get_psr_state(dmcu, state); + + return true; +} + +static inline enum physical_phy_id +transmitter_to_phy_id(struct dc_link *link) +{ + struct dc_context *dc_ctx = link->ctx; + enum transmitter transmitter_value = link->link_enc->transmitter; + + switch (transmitter_value) { + case TRANSMITTER_UNIPHY_A: + return PHYLD_0; + case TRANSMITTER_UNIPHY_B: + return PHYLD_1; + case TRANSMITTER_UNIPHY_C: + return PHYLD_2; + case TRANSMITTER_UNIPHY_D: + return PHYLD_3; + case TRANSMITTER_UNIPHY_E: + return PHYLD_4; + case TRANSMITTER_UNIPHY_F: + return PHYLD_5; + case TRANSMITTER_NUTMEG_CRT: + return PHYLD_6; + case TRANSMITTER_TRAVIS_CRT: + return PHYLD_7; + case TRANSMITTER_TRAVIS_LCD: + return PHYLD_8; + case TRANSMITTER_UNIPHY_G: + return PHYLD_9; + case TRANSMITTER_COUNT: + return PHYLD_COUNT; + case TRANSMITTER_UNKNOWN: + return PHYLD_UNKNOWN; + default: + DC_ERROR("Unknown transmitter value %d\n", transmitter_value); + return PHYLD_UNKNOWN; + } +} + +bool edp_setup_psr(struct dc_link *link, + const struct dc_stream_state *stream, struct psr_config *psr_config, + struct psr_context *psr_context) +{ + struct dc *dc; + struct dmcu *dmcu; + struct dmub_psr *psr; + int i; + unsigned int panel_inst; + /* updateSinkPsrDpcdConfig*/ + union dpcd_psr_configuration psr_configuration; + union dpcd_sink_active_vtotal_control_mode vtotal_control = {0}; + + psr_context->controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + dc = link->ctx->dc; + dmcu = dc->res_pool->dmcu; + psr = dc->res_pool->psr; + + if (!dmcu && !psr) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + + memset(&psr_configuration, 0, sizeof(psr_configuration)); + + psr_configuration.bits.ENABLE = 1; + psr_configuration.bits.CRC_VERIFICATION = 1; + psr_configuration.bits.FRAME_CAPTURE_INDICATION = + psr_config->psr_frame_capture_indication_req; + + /* Check for PSR v2*/ + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + /* For PSR v2 selective update. + * Indicates whether sink should start capturing + * immediately following active scan line, + * or starting with the 2nd active scan line. + */ + psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; + /*For PSR v2, determines whether Sink should generate + * IRQ_HPD when CRC mismatch is detected. + */ + psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; + /* For PSR v2, set the bit when the Source device will + * be enabling PSR2 operation. + */ + psr_configuration.bits.ENABLE_PSR2 = 1; + /* For PSR v2, the Sink device must be able to receive + * SU region updates early in the frame time. + */ + psr_configuration.bits.EARLY_TRANSPORT_ENABLE = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 368, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + edp_power_alpm_dpcd_enable(link, true); + psr_context->su_granularity_required = + psr_config->su_granularity_required; + psr_context->su_y_granularity = + psr_config->su_y_granularity; + psr_context->line_time_in_us = psr_config->line_time_in_us; + + /* linux must be able to expose AMD Source DPCD definition + * in order to support FreeSync PSR + */ + if (link->psr_settings.psr_vtotal_control_support) { + psr_context->rate_control_caps = psr_config->rate_control_caps; + vtotal_control.bits.ENABLE = true; + core_link_write_dpcd(link, DP_SINK_PSR_ACTIVE_VTOTAL_CONTROL_MODE, + &vtotal_control.raw, sizeof(vtotal_control.raw)); + } + } + + psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; + psr_context->transmitterId = link->link_enc->transmitter; + psr_context->engineId = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + psr_context->controllerId = + dc->current_state->res_ctx. + pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ + psr_context->phyType = PHY_TYPE_UNIPHY; + /*PhyId is associated with the transmitter id*/ + psr_context->smuPhyId = transmitter_to_phy_id(link); + + psr_context->crtcTimingVerticalTotal = stream->timing.v_total; + psr_context->vsync_rate_hz = div64_u64(div64_u64((stream-> + timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + psr_context->psrSupportedDisplayConfig = true; + psr_context->psrExitLinkTrainingRequired = + psr_config->psr_exit_link_training_required; + psr_context->sdpTransmitLineNumDeadline = + psr_config->psr_sdp_transmit_line_num_deadline; + psr_context->psrFrameCaptureIndicationReq = + psr_config->psr_frame_capture_indication_req; + + psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ + + psr_context->numberOfControllers = + link->dc->res_pool->timing_generator_count; + + psr_context->rfb_update_auto_en = true; + + /* 2 frames before enter PSR. */ + psr_context->timehyst_frames = 2; + /* half a frame + * (units in 100 lines, i.e. a value of 1 represents 100 lines) + */ + psr_context->hyst_lines = stream->timing.v_total / 2 / 100; + psr_context->aux_repeats = 10; + + psr_context->psr_level.u32all = 0; + + /*skip power down the single pipe since it blocks the cstate*/ + if (link->ctx->asic_id.chip_family >= FAMILY_RV) { + switch (link->ctx->asic_id.chip_family) { + case FAMILY_YELLOW_CARP: + case AMDGPU_FAMILY_GC_10_3_6: + case AMDGPU_FAMILY_GC_11_0_1: + if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; + break; + default: + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; + break; + } + } + + /* SMU will perform additional powerdown sequence. + * For unsupported ASICs, set psr_level flag to skip PSR + * static screen notification to SMU. + * (Always set for DAL2, did not check ASIC) + */ + psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; + psr_context->allow_multi_disp_optimizations = psr_config->allow_multi_disp_optimizations; + + /* Complete PSR entry before aborting to prevent intermittent + * freezes on certain eDPs + */ + psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; + + /* Disable ALPM first for compatible non-ALPM panel now */ + psr_context->psr_level.bits.DISABLE_ALPM = 0; + psr_context->psr_level.bits.ALPM_DEFAULT_PD_MODE = 1; + + /* Controls additional delay after remote frame capture before + * continuing power down, default = 0 + */ + psr_context->frame_delay = 0; + + psr_context->dsc_slice_height = psr_config->dsc_slice_height; + + if (psr) { + link->psr_settings.psr_feature_enabled = psr->funcs->psr_copy_settings(psr, + link, psr_context, panel_inst); + link->psr_settings.psr_power_opt = 0; + link->psr_settings.psr_allow_active = 0; + } else { + link->psr_settings.psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + } + + /* psr_enabled == 0 indicates setup_psr did not succeed, but this + * should not happen since firmware should be running at this point + */ + if (link->psr_settings.psr_feature_enabled == 0) + ASSERT(0); + + return true; + +} + +void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency) +{ + struct dc *dc = link->ctx->dc; + struct dmub_psr *psr = dc->res_pool->psr; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return; + + // PSR residency measurements only supported on DMCUB + if (psr != NULL && link->psr_settings.psr_feature_enabled) + psr->funcs->psr_get_residency(psr, residency, panel_inst); + else + *residency = 0; +} +bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su) +{ + struct dc *dc = link->ctx->dc; + struct dmub_psr *psr = dc->res_pool->psr; + + if (psr == NULL || !link->psr_settings.psr_feature_enabled || !link->psr_settings.psr_vtotal_control_support) + return false; + + psr->funcs->psr_set_sink_vtotal_in_psr_active(psr, psr_vtotal_idle, psr_vtotal_su); + + return true; +} + +bool edp_set_replay_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (replay == NULL && force_static) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + /* Set power optimization flag */ + if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts) { + if (link->replay_settings.replay_feature_enabled && replay->funcs->replay_set_power_opt) { + replay->funcs->replay_set_power_opt(replay, *power_opts, panel_inst); + link->replay_settings.replay_power_opt_active = *power_opts; + } + } + + /* Activate or deactivate Replay */ + if (allow_active && link->replay_settings.replay_allow_active != *allow_active) { + // TODO: Handle mux change case if force_static is set + // If force_static is set, just change the replay_allow_active state directly + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_enable(replay, *allow_active, wait, panel_inst); + link->replay_settings.replay_allow_active = *allow_active; + } + + return true; +} + +bool edp_get_replay_state(const struct dc_link *link, uint64_t *state) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + enum replay_state pr_state = REPLAY_STATE_0; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_get_state(replay, &pr_state, panel_inst); + *state = pr_state; + + return true; +} + +bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream) +{ + /* To-do: Setup Replay */ + struct dc *dc; + struct dmub_replay *replay; + int i; + unsigned int panel_inst; + struct replay_context replay_context = { 0 }; + unsigned int lineTimeInNs = 0; + + + union replay_enable_and_configuration replay_config; + + union dpcd_alpm_configuration alpm_config; + + replay_context.controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + dc = link->ctx->dc; + + replay = dc->res_pool->replay; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + replay_context.aux_inst = link->ddc->ddc_pin->hw_info.ddc_channel; + replay_context.digbe_inst = link->link_enc->transmitter; + replay_context.digfe_inst = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + replay_context.controllerId = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + lineTimeInNs = + ((stream->timing.h_total * 1000000) / + (stream->timing.pix_clk_100hz / 10)) + 1; + + replay_context.line_time_in_ns = lineTimeInNs; + + link->replay_settings.replay_feature_enabled = + replay->funcs->replay_copy_settings(replay, link, &replay_context, panel_inst); + if (link->replay_settings.replay_feature_enabled) { + + replay_config.bits.FREESYNC_PANEL_REPLAY_MODE = 1; + replay_config.bits.TIMING_DESYNC_ERROR_VERIFICATION = + link->replay_settings.config.replay_timing_sync_supported; + replay_config.bits.STATE_TRANSITION_ERROR_DETECTION = 1; + dm_helpers_dp_write_dpcd(link->ctx, link, + DP_SINK_PR_ENABLE_AND_CONFIGURATION, + (uint8_t *)&(replay_config.raw), sizeof(uint8_t)); + + memset(&alpm_config, 0, sizeof(alpm_config)); + alpm_config.bits.ENABLE = 1; + dm_helpers_dp_write_dpcd( + link->ctx, + link, + DP_RECEIVER_ALPM_CONFIG, + &alpm_config.raw, + sizeof(alpm_config.raw)); + } + return true; +} + +bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!replay) + return false; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) { + replay->funcs->replay_set_coasting_vtotal(replay, coasting_vtotal, panel_inst); + link->replay_settings.coasting_vtotal = coasting_vtotal; + } + + return true; +} + +bool edp_replay_residency(const struct dc_link *link, + unsigned int *residency, const bool is_start, const bool is_alpm) +{ + struct dc *dc = link->ctx->dc; + struct dmub_replay *replay = dc->res_pool->replay; + unsigned int panel_inst; + + if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst)) + return false; + + if (replay != NULL && link->replay_settings.replay_feature_enabled) + replay->funcs->replay_residency(replay, panel_inst, residency, is_start, is_alpm); + else + *residency = 0; + + return true; +} + +static struct abm *get_abm_from_stream_res(const struct dc_link *link) +{ + int i; + struct dc *dc = link->ctx->dc; + struct abm *abm = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx pipe_ctx = dc->current_state->res_ctx.pipe_ctx[i]; + struct dc_stream_state *stream = pipe_ctx.stream; + + if (stream && stream->link == link) { + abm = pipe_ctx.stream_res.abm; + break; + } + } + return abm; +} + +int edp_get_backlight_level(const struct dc_link *link) +{ + struct abm *abm = get_abm_from_stream_res(link); + struct panel_cntl *panel_cntl = link->panel_cntl; + struct dc *dc = link->ctx->dc; + struct dmcu *dmcu = dc->res_pool->dmcu; + bool fw_set_brightness = true; + + if (dmcu) + fw_set_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); + + if (!fw_set_brightness && panel_cntl->funcs->get_current_backlight) + return panel_cntl->funcs->get_current_backlight(panel_cntl); + else if (abm != NULL && abm->funcs->get_current_backlight != NULL) + return (int) abm->funcs->get_current_backlight(abm); + else + return DC_ERROR_UNEXPECTED; +} + +int edp_get_target_backlight_pwm(const struct dc_link *link) +{ + struct abm *abm = get_abm_from_stream_res(link); + + if (abm == NULL || abm->funcs->get_target_backlight == NULL) + return DC_ERROR_UNEXPECTED; + + return (int) abm->funcs->get_target_backlight(abm); +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h new file mode 100644 index 0000000000..a034288ad7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -0,0 +1,74 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_EDP_PANEL_CONTROL_H__ +#define __DC_LINK_EDP_PANEL_CONTROL_H__ +#include "link.h" + +enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); +void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); +bool set_default_brightness_aux(struct dc_link *link); +void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); +int edp_get_backlight_level(const struct dc_link *link); +bool edp_get_backlight_level_nits(struct dc_link *link, + uint32_t *backlight_millinits_avg, + uint32_t *backlight_millinits_peak); +bool edp_set_backlight_level(const struct dc_link *link, + uint32_t backlight_pwm_u16_16, + uint32_t frame_ramp); +bool edp_set_backlight_level_nits(struct dc_link *link, + bool isHDR, + uint32_t backlight_millinits, + uint32_t transition_time_in_ms); +int edp_get_target_backlight_pwm(const struct dc_link *link); +bool edp_get_psr_state(const struct dc_link *link, enum dc_psr_state *state); +bool edp_set_psr_allow_active(struct dc_link *link, const bool *allow_active, + bool wait, bool force_static, const unsigned int *power_opts); +bool edp_setup_psr(struct dc_link *link, + const struct dc_stream_state *stream, struct psr_config *psr_config, + struct psr_context *psr_context); +bool edp_set_sink_vtotal_in_psr_active(const struct dc_link *link, + uint16_t psr_vtotal_idle, uint16_t psr_vtotal_su); +void edp_get_psr_residency(const struct dc_link *link, uint32_t *residency); +bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable, + bool wait, bool force_static, const unsigned int *power_opts); +bool edp_setup_replay(struct dc_link *link, + const struct dc_stream_state *stream); +bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_replay_residency(const struct dc_link *link, + unsigned int *residency, const bool is_start, const bool is_alpm); +bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); +bool edp_wait_for_t12(struct dc_link *link); +bool edp_is_ilr_optimization_required(struct dc_link *link, + struct dc_crtc_timing *crtc_timing); +bool edp_is_ilr_optimization_enabled(struct dc_link *link); +enum dc_link_rate get_max_link_rate_from_ilr_table(struct dc_link *link); +bool edp_backlight_enable_aux(struct dc_link *link, bool enable); +void edp_add_delay_for_T9(struct dc_link *link); +bool edp_receiver_ready_T9(struct dc_link *link); +bool edp_receiver_ready_T7(struct dc_link *link); +bool edp_power_alpm_dpcd_enable(struct dc_link *link, bool enable); +void edp_set_panel_power(struct dc_link *link, bool powerOn); +#endif /* __DC_LINK_EDP_POWER_CONTROL_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c new file mode 100644 index 0000000000..e3d729ab5b --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.c @@ -0,0 +1,240 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* FILE POLICY AND INTENDED USAGE: + * + * This file implements functions that manage basic HPD components such as gpio. + * It also provides wrapper functions to execute HPD related programming. This + * file only manages basic HPD functionality. It doesn't manage detection or + * feature or signal specific HPD behaviors. + */ +#include "link_hpd.h" +#include "gpio_service_interface.h" + +bool link_get_hpd_state(struct dc_link *link) +{ + uint32_t state; + + dal_gpio_lock_pin(link->hpd_gpio); + dal_gpio_get_value(link->hpd_gpio, &state); + dal_gpio_unlock_pin(link->hpd_gpio); + + return state; +} + +void link_enable_hpd(const struct dc_link *link) +{ + struct link_encoder *encoder = link->link_enc; + + if (encoder != NULL && encoder->funcs->enable_hpd != NULL) + encoder->funcs->enable_hpd(encoder); +} + +void link_disable_hpd(const struct dc_link *link) +{ + struct link_encoder *encoder = link->link_enc; + + if (encoder != NULL && encoder->funcs->enable_hpd != NULL) + encoder->funcs->disable_hpd(encoder); +} + +void link_enable_hpd_filter(struct dc_link *link, bool enable) +{ + struct gpio *hpd; + + if (enable) { + link->is_hpd_filter_disabled = false; + program_hpd_filter(link); + } else { + link->is_hpd_filter_disabled = true; + /* Obtain HPD handle */ + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service); + + if (!hpd) + return; + + /* Setup HPD filtering */ + if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { + struct gpio_hpd_config config; + + config.delay_on_connect = 0; + config.delay_on_disconnect = 0; + + dal_irq_setup_hpd_filter(hpd, &config); + + dal_gpio_close(hpd); + } else { + ASSERT_CRITICAL(false); + } + /* Release HPD handle */ + dal_gpio_destroy_irq(&hpd); + } +} + +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, + struct graphics_object_id link_id, + struct gpio_service *gpio_service) +{ + enum bp_result bp_result; + struct graphics_object_hpd_info hpd_info; + struct gpio_pin_info pin_info; + + if (dcb->funcs->get_hpd_info(dcb, link_id, &hpd_info) != BP_RESULT_OK) + return NULL; + + bp_result = dcb->funcs->get_gpio_pin_info(dcb, + hpd_info.hpd_int_gpio_uid, &pin_info); + + if (bp_result != BP_RESULT_OK) { + ASSERT(bp_result == BP_RESULT_NORECORD); + return NULL; + } + + return dal_gpio_service_create_irq(gpio_service, + pin_info.offset, + pin_info.mask); +} + +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high) +{ + struct gpio *hpd_pin = link_get_hpd_gpio( + link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + if (!hpd_pin) + return false; + + dal_gpio_open(hpd_pin, GPIO_MODE_INTERRUPT); + dal_gpio_get_value(hpd_pin, is_hpd_high); + dal_gpio_close(hpd_pin); + dal_gpio_destroy_irq(&hpd_pin); + return true; +} + +enum hpd_source_id get_hpd_line(struct dc_link *link) +{ + struct gpio *hpd; + enum hpd_source_id hpd_id; + + hpd_id = HPD_SOURCEID_UNKNOWN; + + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (hpd) { + switch (dal_irq_get_source(hpd)) { + case DC_IRQ_SOURCE_HPD1: + hpd_id = HPD_SOURCEID1; + break; + case DC_IRQ_SOURCE_HPD2: + hpd_id = HPD_SOURCEID2; + break; + case DC_IRQ_SOURCE_HPD3: + hpd_id = HPD_SOURCEID3; + break; + case DC_IRQ_SOURCE_HPD4: + hpd_id = HPD_SOURCEID4; + break; + case DC_IRQ_SOURCE_HPD5: + hpd_id = HPD_SOURCEID5; + break; + case DC_IRQ_SOURCE_HPD6: + hpd_id = HPD_SOURCEID6; + break; + default: + BREAK_TO_DEBUGGER(); + break; + } + + dal_gpio_destroy_irq(&hpd); + } + + return hpd_id; +} + +bool program_hpd_filter(const struct dc_link *link) +{ + bool result = false; + struct gpio *hpd; + int delay_on_connect_in_ms = 0; + int delay_on_disconnect_in_ms = 0; + + if (link->is_hpd_filter_disabled) + return false; + /* Verify feature is supported */ + switch (link->connector_signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + /* Program hpd filter */ + delay_on_connect_in_ms = 500; + delay_on_disconnect_in_ms = 100; + break; + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + /* Program hpd filter to allow DP signal to settle */ + /* 500: not able to detect MST <-> SST switch as HPD is low for + * only 100ms on DELL U2413 + * 0: some passive dongle still show aux mode instead of i2c + * 20-50: not enough to hide bouncing HPD with passive dongle. + * also see intermittent i2c read issues. + */ + delay_on_connect_in_ms = 80; + delay_on_disconnect_in_ms = 0; + break; + case SIGNAL_TYPE_LVDS: + case SIGNAL_TYPE_EDP: + default: + /* Don't program hpd filter */ + return false; + } + + /* Obtain HPD handle */ + hpd = link_get_hpd_gpio(link->ctx->dc_bios, link->link_id, + link->ctx->gpio_service); + + if (!hpd) + return result; + + /* Setup HPD filtering */ + if (dal_gpio_open(hpd, GPIO_MODE_INTERRUPT) == GPIO_RESULT_OK) { + struct gpio_hpd_config config; + + config.delay_on_connect = delay_on_connect_in_ms; + config.delay_on_disconnect = delay_on_disconnect_in_ms; + + dal_irq_setup_hpd_filter(hpd, &config); + + dal_gpio_close(hpd); + + result = true; + } else { + ASSERT_CRITICAL(false); + } + + /* Release HPD handle */ + dal_gpio_destroy_irq(&hpd); + + return result; +} diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h new file mode 100644 index 0000000000..4fb526b264 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_hpd.h @@ -0,0 +1,54 @@ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#ifndef __DC_LINK_HPD_H__ +#define __DC_LINK_HPD_H__ +#include "link.h" + +enum hpd_source_id get_hpd_line(struct dc_link *link); +/* + * Function: program_hpd_filter + * + * @brief + * Programs HPD filter on associated HPD line to default values. + * + * @return + * true on success, false otherwise + */ +bool program_hpd_filter(const struct dc_link *link); +/* Query hot plug status of USB4 DP tunnel. + * Returns true if HPD high. + */ +bool dpia_query_hpd_status(struct dc_link *link); +bool query_hpd_status(struct dc_link *link, uint32_t *is_hpd_high); +bool link_get_hpd_state(struct dc_link *link); +struct gpio *link_get_hpd_gpio(struct dc_bios *dcb, + struct graphics_object_id link_id, + struct gpio_service *gpio_service); +void link_enable_hpd(const struct dc_link *link); +void link_disable_hpd(const struct dc_link *link); +void link_enable_hpd_filter(struct dc_link *link, bool enable); +#endif /* __DC_LINK_HPD_H__ */ -- cgit v1.2.3