summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/display/dc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc')
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/conversion.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c64
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c108
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c81
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c461
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c185
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c476
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c880
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c147
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h71
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c122
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane_priv.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state.h78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_state_priv.h102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h80
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream_priv.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h88
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c96
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/Makefile46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce60/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/Makefile12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn314/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn315/Makefile30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn316/Makefile30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/Makefile8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c148
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn321/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_pp_smu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c89
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dsc.h (renamed from drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/Makefile28
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c82
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c176
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c272
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c)5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile17
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c171
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_validation.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c62
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/Makefile108
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h)3
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c)11
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h)1
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/Makefile199
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c)30
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c)24
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c)14
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c)9
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c)4
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c)138
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h)26
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c)27
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c)48
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h (renamed from drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h)1
224 files changed, 4402 insertions, 2013 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 3a169b78e..7991ae468 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -22,7 +22,7 @@
#
# Makefile for Display Core (dc) component.
-DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc
+DC_LIBS = basics bios dml clk_mgr dce gpio hwss irq link virtual dsc resource optc
ifdef CONFIG_DRM_AMD_DC_FP
@@ -34,12 +34,8 @@ DC_LIBS += dcn21
DC_LIBS += dcn201
DC_LIBS += dcn30
DC_LIBS += dcn301
-DC_LIBS += dcn302
-DC_LIBS += dcn303
DC_LIBS += dcn31
DC_LIBS += dcn314
-DC_LIBS += dcn315
-DC_LIBS += dcn316
DC_LIBS += dcn32
DC_LIBS += dcn321
DC_LIBS += dcn35
@@ -51,7 +47,6 @@ DC_LIBS += dce120
DC_LIBS += dce112
DC_LIBS += dce110
-DC_LIBS += dce100
DC_LIBS += dce80
ifdef CONFIG_DRM_AMD_DC_SI
@@ -65,7 +60,7 @@ AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LI
include $(AMD_DC)
DISPLAY_CORE = dc.o dc_stat.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
-dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o
+dc_surface.o dc_debug.o dc_stream.o dc_link_enc_cfg.o dc_link_exports.o dc_state.o
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/basics/conversion.c b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
index e295a839a..1090d2350 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/conversion.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/conversion.c
@@ -103,7 +103,8 @@ void convert_float_matrix(
static uint32_t find_gcd(uint32_t a, uint32_t b)
{
- uint32_t remainder = 0;
+ uint32_t remainder;
+
while (b != 0) {
remainder = a % b;
a = b;
diff --git a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
index f2dfa96f9..39530b2ea 100644
--- a/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/basics/dce_calcs.c
@@ -94,7 +94,7 @@ static void calculate_bandwidth(
const uint32_t s_high = 7;
const uint32_t dmif_chunk_buff_margin = 1;
- uint32_t max_chunks_fbc_mode;
+ uint32_t max_chunks_fbc_mode = 0;
int32_t num_cursor_lines;
int32_t i, j, k;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index bc7a375f4..05f392501 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -2223,22 +2223,22 @@ static enum bp_result bios_parser_get_disp_connector_caps_info(
switch (bp->object_info_tbl.revision.minor) {
case 4:
- default:
- object = get_bios_object(bp, object_id);
-
- if (!object)
- return BP_RESULT_BADINPUT;
-
- record = get_disp_connector_caps_record(bp, object);
- if (!record)
- return BP_RESULT_NORECORD;
-
- info->INTERNAL_DISPLAY =
- (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
- info->INTERNAL_DISPLAY_BL =
- (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
- break;
- case 5:
+ default:
+ object = get_bios_object(bp, object_id);
+
+ if (!object)
+ return BP_RESULT_BADINPUT;
+
+ record = get_disp_connector_caps_record(bp, object);
+ if (!record)
+ return BP_RESULT_NORECORD;
+
+ info->INTERNAL_DISPLAY =
+ (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY) ? 1 : 0;
+ info->INTERNAL_DISPLAY_BL =
+ (record->connectcaps & ATOM_CONNECTOR_CAP_INTERNAL_DISPLAY_BL) ? 1 : 0;
+ break;
+ case 5:
object_path_v3 = get_bios_object_from_path_v3(bp, object_id);
if (!object_path_v3)
@@ -2400,7 +2400,6 @@ static enum bp_result get_vram_info_v30(
return result;
}
-
/*
* get_integrated_info_v11
*
@@ -3336,27 +3335,28 @@ static enum bp_result get_bracket_layout_record(
DC_LOG_DETECTION_EDID_PARSER("Invalid slot_layout_info\n");
return BP_RESULT_BADINPUT;
}
+
tbl = &bp->object_info_tbl;
v1_4 = tbl->v1_4;
v1_5 = tbl->v1_5;
result = BP_RESULT_NORECORD;
switch (bp->object_info_tbl.revision.minor) {
- case 4:
- default:
- for (i = 0; i < v1_4->number_of_path; ++i) {
- if (bracket_layout_id ==
- v1_4->display_path[i].display_objid) {
- result = update_slot_layout_info(dcb, i, slot_layout_info);
- break;
- }
+ case 4:
+ default:
+ for (i = 0; i < v1_4->number_of_path; ++i) {
+ if (bracket_layout_id == v1_4->display_path[i].display_objid) {
+ result = update_slot_layout_info(dcb, i, slot_layout_info);
+ break;
}
- break;
- case 5:
- for (i = 0; i < v1_5->number_of_path; ++i)
- result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
- break;
+ }
+ break;
+ case 5:
+ for (i = 0; i < v1_5->number_of_path; ++i)
+ result = update_slot_layout_info_v2(dcb, i, slot_layout_info);
+ break;
}
+
return result;
}
@@ -3365,9 +3365,7 @@ static enum bp_result bios_get_board_layout_info(
struct board_layout_info *board_layout_info)
{
unsigned int i;
-
struct bios_parser *bp;
-
static enum bp_result record_result;
unsigned int max_slots;
@@ -3377,7 +3375,6 @@ static enum bp_result bios_get_board_layout_info(
0, 0
};
-
bp = BP_FROM_DCB(dcb);
if (board_layout_info == NULL) {
@@ -3558,7 +3555,6 @@ static const struct dc_vbios_funcs vbios_funcs = {
.bios_parser_destroy = firmware_parser_destroy,
.get_board_layout_info = bios_get_board_layout_info,
- /* TODO: use this fn in hw init?*/
.pack_data_tables = bios_parser_pack_data_tables,
.get_atom_dc_golden_table = bios_get_atom_dc_golden_table,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 3e73c4e59..28a2a837d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -29,6 +29,7 @@
#include "dc_types.h"
#include "dccg.h"
#include "clk_mgr_internal.h"
+#include "dc_state_priv.h"
#include "link.h"
#include "dce100/dce_clk_mgr.h"
@@ -63,7 +64,7 @@ int clk_mgr_helper_get_active_display_cnt(
/* Don't count SubVP phantom pipes as part of active
* display count
*/
- if (stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM)
continue;
/*
@@ -368,7 +369,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
}
break;
-#endif /* CONFIG_DRM_AMD_DC_FP - Family RV */
+#endif /* CONFIG_DRM_AMD_DC_FP */
default:
ASSERT(0); /* Unknown Asic */
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index a5489fe68..aa9fd1dc5 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -546,6 +546,8 @@ static unsigned int find_dcfclk_for_voltage(const struct vg_dpm_clocks *clock_ta
int i;
for (i = 0; i < VG_NUM_SOC_VOLTAGE_LEVELS; i++) {
+ if (i >= VG_NUM_DCFCLK_DPM_LEVELS)
+ break;
if (clock_table->SocVoltage[i] == voltage)
return clock_table->DcfClocks[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
index 12f3e8aa4..6ad4f4efe 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
@@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa(
return display_count;
}
-static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable)
+static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context,
+ bool safe_to_lower, bool disable)
{
struct dc *dc = clk_mgr_base->ctx->dc;
int i;
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
- struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe = safe_to_lower
+ ? &context->res_ctx.pipe_ctx[i]
+ : &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL ||
- dc_is_virtual_signal(pipe->stream->signal))) {
+ if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) ||
+ !pipe->stream->link_enc)) {
if (disable) {
- pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+ if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc)
+ pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg);
+
reset_sync_context_for_pipe(dc, context, i);
} else
pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg);
@@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
- dcn316_disable_otg_wa(clk_mgr_base, context, true);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
- dcn316_disable_otg_wa(clk_mgr_base, context, false);
+ dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
update_dispclk = true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
index 289918ea7..bbdbc7816 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
@@ -25,7 +25,6 @@
#include "dccg.h"
#include "clk_mgr_internal.h"
-
#include "dcn32/dcn32_clk_mgr_smu_msg.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dce100/dce_clk_mgr.h"
@@ -34,7 +33,7 @@
#include "core_types.h"
#include "dm_helpers.h"
#include "link.h"
-
+#include "dc_state_priv.h"
#include "atomfirmware.h"
#include "smu13_driver_if.h"
@@ -472,20 +471,56 @@ static int dcn32_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base)
return 0;
}
-static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr_internal *clk_mgr)
+static bool dcn32_check_native_scaling(struct pipe_ctx *pipe)
{
- unsigned int dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
- unsigned int dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
- unsigned int dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
- unsigned int dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
- unsigned int dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
- unsigned int fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
+ bool is_native_scaling = false;
+ int width = pipe->plane_state->src_rect.width;
+ int height = pipe->plane_state->src_rect.height;
+
+ if (pipe->stream->timing.h_addressable == width &&
+ pipe->stream->timing.v_addressable == height &&
+ pipe->plane_state->dst_rect.width == width &&
+ pipe->plane_state->dst_rect.height == height)
+ is_native_scaling = true;
+
+ return is_native_scaling;
+}
+
+static void dcn32_auto_dpm_test_log(
+ struct dc_clocks *new_clocks,
+ struct clk_mgr_internal *clk_mgr,
+ struct dc_state *context)
+{
+ unsigned int dispclk_khz_reg, dppclk_khz_reg, dprefclk_khz_reg, dcfclk_khz_reg, dtbclk_khz_reg,
+ fclk_khz_reg, mall_ss_size_bytes;
+ int dramclk_khz_override, fclk_khz_override, num_fclk_levels;
+
+ struct pipe_ctx *pipe_ctx_list[MAX_PIPES];
+ int active_pipe_count = 0;
+
+ for (int i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
+ pipe_ctx_list[active_pipe_count] = pipe_ctx;
+ active_pipe_count++;
+ }
+ }
+
+ mall_ss_size_bytes = context->bw_ctx.bw.dcn.mall_ss_size_bytes;
+
+ dispclk_khz_reg = REG_READ(CLK1_CLK0_CURRENT_CNT); // DISPCLK
+ dppclk_khz_reg = REG_READ(CLK1_CLK1_CURRENT_CNT); // DPPCLK
+ dprefclk_khz_reg = REG_READ(CLK1_CLK2_CURRENT_CNT); // DPREFCLK
+ dcfclk_khz_reg = REG_READ(CLK1_CLK3_CURRENT_CNT); // DCFCLK
+ dtbclk_khz_reg = REG_READ(CLK1_CLK4_CURRENT_CNT); // DTBCLK
+ fclk_khz_reg = REG_READ(CLK4_CLK0_CURRENT_CNT); // FCLK
// Overrides for these clocks in case there is no p_state change support
- int dramclk_khz_override = new_clocks->dramclk_khz;
- int fclk_khz_override = new_clocks->fclk_khz;
+ dramclk_khz_override = new_clocks->dramclk_khz;
+ fclk_khz_override = new_clocks->fclk_khz;
- int num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
+ num_fclk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_fclk_levels - 1;
if (!new_clocks->p_state_change_support) {
dramclk_khz_override = clk_mgr->base.bw_params->max_memclk_mhz * 1000;
@@ -502,16 +537,49 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
//
// AutoDPMTest: clk1:%d - clk2:%d - clk3:%d - clk4:%d\n"
////////////////////////////////////////////////////////////////////////////
- if (new_clocks &&
+ if (new_clocks && active_pipe_count > 0 &&
new_clocks->dramclk_khz > 0 &&
new_clocks->fclk_khz > 0 &&
new_clocks->dcfclk_khz > 0 &&
new_clocks->dppclk_khz > 0) {
+ uint32_t pix_clk_list[MAX_PIPES] = {0};
+ int p_state_list[MAX_PIPES] = {0};
+ int disp_src_width_list[MAX_PIPES] = {0};
+ int disp_src_height_list[MAX_PIPES] = {0};
+ uint64_t disp_src_refresh_list[MAX_PIPES] = {0};
+ bool is_scaled_list[MAX_PIPES] = {0};
+
+ for (int i = 0; i < active_pipe_count; i++) {
+ struct pipe_ctx *curr_pipe_ctx = pipe_ctx_list[i];
+ uint64_t refresh_rate;
+
+ pix_clk_list[i] = curr_pipe_ctx->stream->timing.pix_clk_100hz;
+ p_state_list[i] = curr_pipe_ctx->p_state_type;
+
+ refresh_rate = (curr_pipe_ctx->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ curr_pipe_ctx->stream->timing.v_total * curr_pipe_ctx->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, curr_pipe_ctx->stream->timing.h_total);
+ disp_src_refresh_list[i] = refresh_rate;
+
+ if (curr_pipe_ctx->plane_state) {
+ is_scaled_list[i] = !(dcn32_check_native_scaling(curr_pipe_ctx));
+ disp_src_width_list[i] = curr_pipe_ctx->plane_state->src_rect.width;
+ disp_src_height_list[i] = curr_pipe_ctx->plane_state->src_rect.height;
+ }
+ }
+
DC_LOG_AUTO_DPM_TEST("AutoDPMTest: dramclk:%d - fclk:%d - "
"dcfclk:%d - dppclk:%d - dispclk_hw:%d - "
"dppclk_hw:%d - dprefclk_hw:%d - dcfclk_hw:%d - "
- "dtbclk_hw:%d - fclk_hw:%d\n",
+ "dtbclk_hw:%d - fclk_hw:%d - pix_clk_0:%d - pix_clk_1:%d - "
+ "pix_clk_2:%d - pix_clk_3:%d - mall_ss_size:%d - p_state_type_0:%d - "
+ "p_state_type_1:%d - p_state_type_2:%d - p_state_type_3:%d - "
+ "pix_width_0:%d - pix_height_0:%d - refresh_rate_0:%lld - is_scaled_0:%d - "
+ "pix_width_1:%d - pix_height_1:%d - refresh_rate_1:%lld - is_scaled_1:%d - "
+ "pix_width_2:%d - pix_height_2:%d - refresh_rate_2:%lld - is_scaled_2:%d - "
+ "pix_width_3:%d - pix_height_3:%d - refresh_rate_3:%lld - is_scaled_3:%d - LOG_END\n",
dramclk_khz_override,
fclk_khz_override,
new_clocks->dcfclk_khz,
@@ -521,7 +589,14 @@ static void dcn32_auto_dpm_test_log(struct dc_clocks *new_clocks, struct clk_mgr
dprefclk_khz_reg,
dcfclk_khz_reg,
dtbclk_khz_reg,
- fclk_khz_reg);
+ fclk_khz_reg,
+ pix_clk_list[0], pix_clk_list[1], pix_clk_list[3], pix_clk_list[2],
+ mall_ss_size_bytes,
+ p_state_list[0], p_state_list[1], p_state_list[2], p_state_list[3],
+ disp_src_width_list[0], disp_src_height_list[0], disp_src_refresh_list[0], is_scaled_list[0],
+ disp_src_width_list[1], disp_src_height_list[1], disp_src_refresh_list[1], is_scaled_list[1],
+ disp_src_width_list[2], disp_src_height_list[2], disp_src_refresh_list[2], is_scaled_list[2],
+ disp_src_width_list[3], disp_src_height_list[3], disp_src_refresh_list[3], is_scaled_list[3]);
}
}
@@ -694,6 +769,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
/* DCCG requires KHz precision for DTBCLK */
clk_mgr_base->clks.ref_dtbclk_khz =
dcn32_smu_set_hard_min_by_freq(clk_mgr, PPCLK_DTBCLK, khz_to_mhz_ceil(new_clocks->ref_dtbclk_khz));
+
dcn32_update_clocks_update_dtb_dto(clk_mgr, context, clk_mgr_base->clks.ref_dtbclk_khz);
}
@@ -722,7 +798,7 @@ static void dcn32_update_clocks(struct clk_mgr *clk_mgr_base,
clk_mgr_base->clks.dispclk_khz / 1000 / 7);
if (dc->config.enable_auto_dpm_test_logs) {
- dcn32_auto_dpm_test_log(new_clocks, clk_mgr);
+ dcn32_auto_dpm_test_log(new_clocks, clk_mgr, context);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 353d5fb9e..9cbab880c 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -50,6 +50,7 @@
#include "dc_dmub_srv.h"
#include "link.h"
#include "logger_types.h"
+
#undef DC_LOGGER
#define DC_LOGGER \
clk_mgr->base.base.ctx->logger
@@ -80,12 +81,12 @@
static int dcn35_get_active_display_cnt_wa(
struct dc *dc,
- struct dc_state *context)
+ struct dc_state *context,
+ int *all_active_disps)
{
- int i, display_count;
+ int i, display_count = 0;
bool tmds_present = false;
- display_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
@@ -103,7 +104,8 @@ static int dcn35_get_active_display_cnt_wa(
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
-
+ if (all_active_disps != NULL)
+ *all_active_disps = display_count;
/* WA for hang on HDMI after display off back on*/
if (display_count == 0 && tmds_present)
display_count = 1;
@@ -216,15 +218,16 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count;
+ int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
+ int all_active_disps = 0;
if (dc->work_arounds.skip_clock_update)
return;
- /* DTBCLK is fixed, so set a default if unspecified. */
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, &all_active_disps);
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
@@ -246,7 +249,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
}
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn35_get_active_display_cnt_wa(dc, context);
/* if we can go lower, go lower */
if (display_count == 0)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -382,19 +384,6 @@ static void dcn35_enable_pme_wa(struct clk_mgr *clk_mgr_base)
dcn35_smu_enable_pme_wa(clk_mgr);
}
-void dcn35_init_clocks(struct clk_mgr *clk_mgr)
-{
- uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
-
- memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
-
- // Assumption is that boot state always supports pstate
- clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
- clk_mgr->clks.p_state_change_support = true;
- clk_mgr->clks.prev_p_state_change_support = true;
- clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
- clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
-}
bool dcn35_are_clock_states_equal(struct dc_clocks *a,
struct dc_clocks *b)
@@ -416,11 +405,22 @@ bool dcn35_are_clock_states_equal(struct dc_clocks *a,
}
static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
- struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info)
+ struct clk_mgr_dcn35 *clk_mgr)
{
-
}
+void dcn35_init_clocks(struct clk_mgr *clk_mgr)
+{
+ uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz;
+
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ // Assumption is that boot state always supports pstate
+ clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
+ clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
+}
static struct clk_bw_params dcn35_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
@@ -436,32 +436,32 @@ static struct wm_table ddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.72,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -473,32 +473,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
.valid = true,
},
}
@@ -825,7 +825,7 @@ static void dcn35_set_low_power_state(struct clk_mgr *clk_mgr_base)
struct dc_state *context = dc->current_state;
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn35_get_active_display_cnt_wa(dc, context);
+ display_count = dcn35_get_active_display_cnt_wa(dc, context, NULL);
/* if we can go lower, go lower */
if (display_count == 0)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
@@ -992,7 +992,6 @@ void dcn35_clk_mgr_construct(
struct dccg *dccg)
{
struct dcn35_smu_dpm_clks smu_dpm_clks = { 0 };
- struct clk_log_info log_info = {0};
clk_mgr->base.base.ctx = ctx;
clk_mgr->base.base.funcs = &dcn35_funcs;
@@ -1045,7 +1044,7 @@ void dcn35_clk_mgr_construct(
dcn35_bw_params.wm_table = ddr5_wm_table;
}
/* Saved clocks configured at boot for debug purposes */
- dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, &clk_mgr->base.base, &log_info);
+ dcn35_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
clk_mgr->base.base.dprefclk_khz = dcn35_smu_get_dprefclk(&clk_mgr->base);
clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
index b6b8c3ca1..6d4a1ffab 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_smu.c
@@ -116,6 +116,9 @@ static uint32_t dcn35_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, un
msleep(delay_us/1000);
else if (delay_us > 0)
udelay(delay_us);
+
+ if (clk_mgr->base.ctx->dc->debug.disable_timeout)
+ max_retries++;
} while (max_retries--);
return res_val;
@@ -276,7 +279,7 @@ void dcn35_smu_set_display_idle_optimization(struct clk_mgr_internal *clk_mgr, u
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info);
- smu_print("VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %d\n", idle_info);
+ smu_print("%s: VBIOSSMC_MSG_SetDisplayIdleOptimizations idle_info = %x\n", __func__, idle_info);
}
void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
@@ -295,7 +298,7 @@ void dcn35_smu_enable_phy_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool e
clk_mgr,
VBIOSSMC_MSG_SetDisplayIdleOptimizations,
idle_info.data);
- smu_print("dcn35_smu_enable_phy_refclk_pwrdwn = %d\n", enable ? 1 : 0);
+ smu_print("%s smu_enable_phy_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
}
void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
@@ -307,6 +310,7 @@ void dcn35_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr)
clk_mgr,
VBIOSSMC_MSG_UpdatePmeRestore,
0);
+ smu_print("%s: SMC_MSG_UpdatePmeRestore\n", __func__);
}
void dcn35_smu_set_dram_addr_high(struct clk_mgr_internal *clk_mgr, uint32_t addr_high)
@@ -347,7 +351,7 @@ void dcn35_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr)
void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support)
{
- unsigned int msg_id, param;
+ unsigned int msg_id, param, retv;
if (!clk_mgr->smu_present)
return;
@@ -357,27 +361,32 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
case DCN_ZSTATE_SUPPORT_ALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 9) | (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_DISALLOW:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = 0;
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg_id = DISALLOW, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z10_ONLY, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_Z10_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 10) | (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_Z10_ONLY, param = %d\n", __func__, param);
break;
case DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY:
msg_id = VBIOSSMC_MSG_AllowZstatesEntry;
param = (1 << 8);
+ smu_print("%s: SMC_MSG_AllowZstatesEntry msg = ALLOW_Z8_ONLY, param = %d\n", __func__, param);
break;
default: //DCN_ZSTATE_SUPPORT_UNKNOWN
@@ -387,11 +396,11 @@ void dcn35_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zst
}
- dcn35_smu_send_msg_with_param(
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
msg_id,
param);
- smu_print("dcn35_smu_set_zstate_support msg_id = %d, param = %d\n", msg_id, param);
+ smu_print("%s: msg_id = %d, param = 0x%x, return = %d\n", __func__, msg_id, param, retv);
}
int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
@@ -405,7 +414,7 @@ int dcn35_smu_get_dprefclk(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_GetDprefclkFreq,
0);
- smu_print("dcn35_smu_get_DPREF clk = %d mhz\n", dprefclk);
+ smu_print("%s: SMU DPREF clk = %d mhz\n", __func__, dprefclk);
return dprefclk * 1000;
}
@@ -420,7 +429,7 @@ int dcn35_smu_get_dtbclk(struct clk_mgr_internal *clk_mgr)
VBIOSSMC_MSG_GetDtbclkFreq,
0);
- smu_print("dcn35_smu_get_dtbclk = %d mhz\n", dtbclk);
+ smu_print("%s: get_dtbclk = %dmhz\n", __func__, dtbclk);
return dtbclk * 1000;
}
/* Arg = 1: Turn DTB on; 0: Turn DTB CLK OFF. when it is on, it is 600MHZ */
@@ -433,7 +442,7 @@ void dcn35_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable)
clk_mgr,
VBIOSSMC_MSG_SetDtbClk,
enable);
- smu_print("dcn35_smu_set_dtbclk = %d \n", enable ? 1 : 0);
+ smu_print("%s: smu_set_dtbclk = %d\n", __func__, enable ? 1 : 0);
}
void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable)
@@ -442,30 +451,45 @@ void dcn35_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *cl
clk_mgr,
VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown,
enable);
+ smu_print("%s: smu_enable_48mhz_tmdp_refclk_pwrdwn = %d\n", __func__, enable ? 1 : 0);
}
int dcn35_smu_exit_low_power_state(struct clk_mgr_internal *clk_mgr)
{
- return dcn35_smu_send_msg_with_param(
+ int retv;
+
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_DispPsrExit,
0);
+ smu_print("%s: smu_exit_low_power_state return = %d\n", __func__, retv);
+ return retv;
}
int dcn35_smu_get_ips_supported(struct clk_mgr_internal *clk_mgr)
{
- return dcn35_smu_send_msg_with_param(
+ int retv;
+
+ retv = dcn35_smu_send_msg_with_param(
clk_mgr,
VBIOSSMC_MSG_QueryIPS2Support,
0);
+
+ //smu_print("%s: VBIOSSMC_MSG_QueryIPS2Support return = %x\n", __func__, retv);
+ return retv;
}
void dcn35_smu_write_ips_scratch(struct clk_mgr_internal *clk_mgr, uint32_t param)
{
REG_WRITE(MP1_SMN_C2PMSG_71, param);
+ //smu_print("%s: write_ips_scratch = %x\n", __func__, param);
}
uint32_t dcn35_smu_read_ips_scratch(struct clk_mgr_internal *clk_mgr)
{
- return REG_READ(MP1_SMN_C2PMSG_71);
+ uint32_t retv;
+
+ retv = REG_READ(MP1_SMN_C2PMSG_71);
+ //smu_print("%s: dcn35_smu_read_ips_scratch = %x\n", __func__, retv);
+ return retv;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index b51208f44..3c3d613c5 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -34,6 +34,8 @@
#include "dce/dce_hwseq.h"
#include "resource.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
#include "gpio_service_interface.h"
#include "clk_mgr.h"
@@ -409,9 +411,14 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc,
* avoid conflicting with firmware updates.
*/
if (dc->ctx->dce_version > DCE_VERSION_MAX)
- if (dc->optimized_required || dc->wm_optimized_required)
+ if (dc->optimized_required)
return false;
+ if (!memcmp(&stream->adjust, adjust, sizeof(*adjust)))
+ return true;
+
+ dc_exit_ips_for_hw_access(dc);
+
stream->adjust.v_total_max = adjust->v_total_max;
stream->adjust.v_total_mid = adjust->v_total_mid;
stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num;
@@ -452,6 +459,8 @@ bool dc_stream_get_last_used_drr_vtotal(struct dc *dc,
int i = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -482,6 +491,8 @@ bool dc_stream_get_crtc_position(struct dc *dc,
bool ret = false;
struct crtc_position position;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe =
&dc->current_state->res_ctx.pipe_ctx[i];
@@ -601,6 +612,8 @@ bool dc_stream_configure_crc(struct dc *dc, struct dc_stream_state *stream,
if (pipe == NULL)
return false;
+ dc_exit_ips_for_hw_access(dc);
+
/* By default, capture the full frame */
param.windowa_x_start = 0;
param.windowa_y_start = 0;
@@ -660,6 +673,8 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream,
struct pipe_ctx *pipe;
struct timing_generator *tg;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
if (pipe->stream == stream)
@@ -684,6 +699,8 @@ void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream,
int i;
struct pipe_ctx *pipe_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -719,6 +736,8 @@ void dc_stream_set_dither_option(struct dc_stream_state *stream,
if (option > DITHER_OPTION_MAX)
return;
+ dc_exit_ips_for_hw_access(stream->ctx->dc);
+
stream->dither_option = option;
memset(&params, 0, sizeof(params));
@@ -743,6 +762,8 @@ bool dc_stream_set_gamut_remap(struct dc *dc, const struct dc_stream_state *stre
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream == stream) {
pipes = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -760,6 +781,8 @@ bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream)
bool ret = false;
struct pipe_ctx *pipes;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (dc->current_state->res_ctx.pipe_ctx[i].stream
== stream) {
@@ -786,6 +809,8 @@ void dc_stream_set_static_screen_params(struct dc *dc,
struct pipe_ctx *pipes_affected[MAX_PIPES];
int num_pipes_affected = 0;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < num_streams; i++) {
struct dc_stream_state *stream = streams[i];
@@ -808,7 +833,7 @@ static void dc_destruct(struct dc *dc)
link_enc_cfg_init(dc, dc->current_state);
if (dc->current_state) {
- dc_release_state(dc->current_state);
+ dc_state_release(dc->current_state);
dc->current_state = NULL;
}
@@ -1020,29 +1045,27 @@ static bool dc_construct(struct dc *dc,
}
#endif
+ if (!create_links(dc, init_params->num_virtual_links))
+ goto fail;
+
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
/* Creation of current_state must occur after dc->dml
* is initialized in dc_create_resource_pool because
* on creation it copies the contents of dc->dml
*/
- dc->current_state = dc_create_state(dc);
+ dc->current_state = dc_state_create(dc);
if (!dc->current_state) {
dm_error("%s: failed to create validate ctx\n", __func__);
goto fail;
}
- if (!create_links(dc, init_params->num_virtual_links))
- goto fail;
-
- /* Create additional DIG link encoder objects if fewer than the platform
- * supports were created during link construction.
- */
- if (!create_link_encoders(dc))
- goto fail;
-
- dc_resource_state_construct(dc, dc->current_state);
-
return true;
fail:
@@ -1085,7 +1108,7 @@ static void apply_ctx_interdependent_lock(struct dc *dc,
}
}
-static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
+static void dc_update_visual_confirm_color(struct dc *dc, struct dc_state *context, struct pipe_ctx *pipe_ctx)
{
if (dc->ctx->dce_version >= DCN_VERSION_1_0) {
memset(&pipe_ctx->visual_confirm_color, 0, sizeof(struct tg_color));
@@ -1105,9 +1128,9 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE)
get_mpctree_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SUBVP)
- get_subvp_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_subvp_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MCLK_SWITCH)
- get_mclk_switch_visual_confirm_color(dc, context, pipe_ctx, &(pipe_ctx->visual_confirm_color));
+ get_mclk_switch_visual_confirm_color(pipe_ctx, &(pipe_ctx->visual_confirm_color));
}
}
}
@@ -1115,7 +1138,7 @@ static void dc_update_viusal_confirm_color(struct dc *dc, struct dc_state *conte
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
- struct dc_state *dangling_context = dc_create_state(dc);
+ struct dc_state *dangling_context = dc_state_create_current_copy(dc);
struct dc_state *current_ctx;
struct pipe_ctx *pipe;
struct timing_generator *tg;
@@ -1123,8 +1146,6 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (dangling_context == NULL)
return;
- dc_resource_state_copy_construct(dc->current_state, dangling_context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct dc_stream_state *old_stream =
dc->current_state->res_ctx.pipe_ctx[i].stream;
@@ -1161,6 +1182,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
}
if (should_disable && old_stream) {
+ bool is_phantom = dc_state_get_stream_subvp_type(dc->current_state, old_stream) == SUBVP_PHANTOM;
pipe = &dc->current_state->res_ctx.pipe_ctx[i];
tg = pipe->stream_res.tg;
/* When disabling plane for a phantom pipe, we must turn on the
@@ -1169,22 +1191,29 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* state that can result in underflow or hang when enabling it
* again for different use.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->enable_crtc) {
int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *old_paired_stream = dc_state_get_paired_subvp_stream(dc->current_state, old_stream);
- main_pipe_width = old_stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = old_stream->mall_stream_config.paired_stream->dst.height;
+ main_pipe_width = old_paired_stream->dst.width;
+ main_pipe_height = old_paired_stream->dst.height;
if (dc->hwss.blank_phantom)
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
tg->funcs->enable_crtc(tg);
}
}
- dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
+
+ if (is_phantom)
+ dc_state_rem_all_phantom_planes_for_stream(dc, old_stream, dangling_context, true);
+ else
+ dc_state_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
+ dc_update_visual_confirm_color(dc, context, pipe);
+ }
if (dc->hwss.apply_ctx_for_surface) {
apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
@@ -1203,7 +1232,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
* The OTG is set to disable on falling edge of VUPDATE so the plane disable
* will still get it's double buffer update.
*/
- if (old_stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (is_phantom) {
if (tg->funcs->disable_phantom_crtc)
tg->funcs->disable_phantom_crtc(tg);
}
@@ -1212,7 +1241,7 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
current_ctx = dc->current_state;
dc->current_state = dangling_context;
- dc_release_state(current_ctx);
+ dc_state_release(current_ctx);
}
static void disable_vbios_mode_if_required(
@@ -1276,6 +1305,54 @@ static void disable_vbios_mode_if_required(
}
}
+/**
+ * wait_for_blank_complete - wait for all active OPPs to finish pending blank
+ * pattern updates
+ *
+ * @dc: [in] dc reference
+ * @context: [in] hardware context in use
+ */
+static void wait_for_blank_complete(struct dc *dc,
+ struct dc_state *context)
+{
+ struct pipe_ctx *opp_head;
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ if (!hws->funcs.wait_for_blank_complete)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ opp_head = &context->res_ctx.pipe_ctx[i];
+
+ if (!resource_is_pipe_type(opp_head, OPP_HEAD) ||
+ dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM)
+ continue;
+
+ hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp);
+ }
+}
+
+static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context)
+{
+ struct pipe_ctx *otg_master;
+ struct timing_generator *tg;
+ int i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ otg_master = &context->res_ctx.pipe_ctx[i];
+ if (!resource_is_pipe_type(otg_master, OTG_MASTER) ||
+ dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM)
+ continue;
+ tg = otg_master->stream_res.tg;
+ if (tg->funcs->wait_odm_doublebuffer_pending_clear)
+ tg->funcs->wait_odm_doublebuffer_pending_clear(tg);
+ }
+
+ /* ODM update may require to reprogram blank pattern for each OPP */
+ wait_for_blank_complete(dc, context);
+}
+
static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
{
int i;
@@ -1284,7 +1361,7 @@ static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context)
int count = 0;
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->plane_state || pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (!pipe->plane_state || dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
continue;
/* Timeout 100 ms */
@@ -1510,7 +1587,7 @@ static void program_timing_sync(
}
for (k = 0; k < group_size; k++) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(ctx, pipe_set[k]->stream);
+ struct dc_stream_status *status = dc_state_get_stream_status(ctx, pipe_set[k]->stream);
status->timing_sync_info.group_id = num_group;
status->timing_sync_info.group_size = group_size;
@@ -1521,7 +1598,7 @@ static void program_timing_sync(
}
- /* remove any other pipes that are already been synced */
+ /* remove any other unblanked pipes as they have already been synced */
if (dc->config.use_pipe_ctx_sync_logic) {
/* check pipe's syncd to decide which pipe to be removed */
for (j = 1; j < group_size; j++) {
@@ -1534,6 +1611,7 @@ static void program_timing_sync(
pipe_set[j]->pipe_idx_syncd = pipe_set[0]->pipe_idx_syncd;
}
} else {
+ /* remove any other pipes by checking valid plane */
for (j = j + 1; j < group_size; j++) {
bool is_blanked;
@@ -1554,7 +1632,7 @@ static void program_timing_sync(
if (group_size > 1) {
if (sync_type == TIMING_SYNCHRONIZABLE) {
dc->hwss.enable_timing_synchronization(
- dc, group_index, group_size, pipe_set);
+ dc, ctx, group_index, group_size, pipe_set);
} else
if (sync_type == VBLANK_SYNCHRONIZABLE) {
dc->hwss.enable_vblanks_synchronization(
@@ -1759,6 +1837,8 @@ void dc_enable_stereo(
int i, j;
struct pipe_ctx *pipe;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
if (context != NULL) {
pipe = &context->res_ctx.pipe_ctx[i];
@@ -1778,6 +1858,8 @@ void dc_enable_stereo(
void dc_trigger_sync(struct dc *dc, struct dc_state *context)
{
if (context->stream_count > 1 && !dc->debug.disable_timing_sync) {
+ dc_exit_ips_for_hw_access(dc);
+
enable_timing_multisync(dc, context);
program_timing_sync(dc, context);
}
@@ -1836,7 +1918,7 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
/* Check old context for SubVP */
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -1962,6 +2044,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
context->stream_count == 0) {
/* Must wait for no flips to be pending before doing optimize bw */
wait_for_no_pipes_pending(dc, context);
+ /*
+ * optimized dispclk depends on ODM setup. Need to wait for ODM
+ * update pending complete before optimizing bandwidth.
+ */
+ wait_for_odm_update_pending_complete(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
/* Need to do otg sync again as otg could be out of sync due to otg
@@ -1994,9 +2081,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
old_state = dc->current_state;
dc->current_state = context;
- dc_release_state(old_state);
+ dc_state_release(old_state);
- dc_retain_state(dc->current_state);
+ dc_state_retain(dc->current_state);
return result;
}
@@ -2034,6 +2121,8 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (!streams_changed(dc, streams, stream_count))
return res;
+ dc_exit_ips_for_hw_access(dc);
+
DC_LOG_DC("%s: %d streams\n", __func__, stream_count);
for (i = 0; i < stream_count; i++) {
@@ -2067,12 +2156,10 @@ enum dc_status dc_commit_streams(struct dc *dc,
if (handle_exit_odm2to1)
res = commit_minimal_transition_state(dc, dc->current_state);
- context = dc_create_state(dc);
+ context = dc_state_create_current_copy(dc);
if (!context)
goto context_alloc_fail;
- dc_resource_state_copy_construct_current(dc, context);
-
res = dc_validate_with_context(dc, set, stream_count, context, false);
if (res != DC_OK) {
BREAK_TO_DEBUGGER();
@@ -2087,7 +2174,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
streams[i]->out.otg_offset = context->stream_status[j].primary_otg_inst;
if (dc_is_embedded_signal(streams[i]->signal)) {
- struct dc_stream_status *status = dc_stream_get_status_from_state(context, streams[i]);
+ struct dc_stream_status *status = dc_state_get_stream_status(context, streams[i]);
if (dc->hwss.is_abm_supported)
status->is_abm_supported = dc->hwss.is_abm_supported(dc, context, streams[i]);
@@ -2098,7 +2185,7 @@ enum dc_status dc_commit_streams(struct dc *dc,
}
fail:
- dc_release_state(context);
+ dc_state_release(context);
context_alloc_fail:
@@ -2152,7 +2239,7 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
- if (!pipe->plane_state || (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM))
+ if (!pipe->plane_state || (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM))
continue;
/* Must set to false to start with, due to OR in update function */
@@ -2210,7 +2297,7 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
if (context->res_ctx.pipe_ctx[i].stream == NULL ||
context->res_ctx.pipe_ctx[i].plane_state == NULL) {
context->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, context, &context->res_ctx.pipe_ctx[i]);
}
process_deferred_updates(dc);
@@ -2222,104 +2309,6 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
}
dc->optimized_required = false;
- dc->wm_optimized_required = false;
-}
-
-static void init_state(struct dc *dc, struct dc_state *context)
-{
- /* Each context must have their own instance of VBA and in order to
- * initialize and obtain IP and SOC the base DML instance from DC is
- * initially copied into every context
- */
- memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
-}
-
-struct dc_state *dc_create_state(struct dc *dc)
-{
- struct dc_state *context = kvzalloc(sizeof(struct dc_state),
- GFP_KERNEL);
-
- if (!context)
- return NULL;
-
- init_state(dc, context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (dc->debug.using_dml2) {
- dml2_create(dc, &dc->dml2_options, &context->bw_ctx.dml2);
- }
-#endif
- kref_init(&context->refcount);
-
- return context;
-}
-
-struct dc_state *dc_copy_state(struct dc_state *src_ctx)
-{
- int i, j;
- struct dc_state *new_ctx = kvmalloc(sizeof(struct dc_state), GFP_KERNEL);
-
- if (!new_ctx)
- return NULL;
- memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- if (new_ctx->bw_ctx.dml2 && !dml2_create_copy(&new_ctx->bw_ctx.dml2, src_ctx->bw_ctx.dml2)) {
- dc_release_state(new_ctx);
- return NULL;
- }
-#endif
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
-
- if (cur_pipe->top_pipe)
- cur_pipe->top_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
- if (cur_pipe->bottom_pipe)
- cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
- if (cur_pipe->prev_odm_pipe)
- cur_pipe->prev_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
-
- if (cur_pipe->next_odm_pipe)
- cur_pipe->next_odm_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
- }
-
- for (i = 0; i < new_ctx->stream_count; i++) {
- dc_stream_retain(new_ctx->streams[i]);
- for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
- dc_plane_state_retain(
- new_ctx->stream_status[i].plane_states[j]);
- }
-
- kref_init(&new_ctx->refcount);
-
- return new_ctx;
-}
-
-void dc_retain_state(struct dc_state *context)
-{
- kref_get(&context->refcount);
-}
-
-static void dc_state_free(struct kref *kref)
-{
- struct dc_state *context = container_of(kref, struct dc_state, refcount);
- dc_resource_state_destruct(context);
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- dml2_destroy(context->bw_ctx.dml2);
- context->bw_ctx.dml2 = 0;
-#endif
-
- kvfree(context);
-}
-
-void dc_release_state(struct dc_state *context)
-{
- kref_put(&context->refcount, dc_state_free);
}
bool dc_set_generic_gpio_for_stereo(bool enable,
@@ -2742,8 +2731,6 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
}
-
- dc->optimized_required |= dc->wm_optimized_required;
}
return type;
@@ -2951,9 +2938,6 @@ static void copy_stream_update_to_stream(struct dc *dc,
if (update->vrr_active_fixed)
stream->vrr_active_fixed = *update->vrr_active_fixed;
- if (update->crtc_timing_adjust)
- stream->adjust = *update->crtc_timing_adjust;
-
if (update->dpms_off)
stream->dpms_off = *update->dpms_off;
@@ -2994,11 +2978,9 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config->num_slices_v != 0);
/* Use temporarry context for validating new DSC config */
- struct dc_state *dsc_validate_context = dc_create_state(dc);
+ struct dc_state *dsc_validate_context = dc_state_create_copy(dc->current_state);
if (dsc_validate_context) {
- dc_resource_state_copy_construct(dc->current_state, dsc_validate_context);
-
stream->timing.dsc_cfg = *update->dsc_config;
stream->timing.flags.DSC = enable_dsc;
if (!dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true)) {
@@ -3007,7 +2989,7 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config = NULL;
}
- dc_release_state(dsc_validate_context);
+ dc_state_release(dsc_validate_context);
} else {
DC_ERROR("Failed to allocate new validate context for DSC change\n");
update->dsc_config = NULL;
@@ -3106,30 +3088,27 @@ static bool update_planes_and_stream_state(struct dc *dc,
new_planes[i] = srf_updates[i].surface;
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(dc->current_state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return false;
}
- dc_resource_state_copy_construct(
- dc->current_state, context);
-
/* For each full update, remove all existing phantom pipes first.
* Ensures that we have enough pipes for newly added MPO planes
*/
- if (dc->res_pool->funcs->remove_phantom_pipes)
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
/*remove old surfaces from context */
- if (!dc_rem_all_planes_for_stream(dc, stream, context)) {
+ if (!dc_state_rem_all_planes_for_stream(dc, stream, context)) {
BREAK_TO_DEBUGGER();
goto fail;
}
/* add surface to context */
- if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
+ if (!dc_state_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) {
BREAK_TO_DEBUGGER();
goto fail;
@@ -3154,19 +3133,6 @@ static bool update_planes_and_stream_state(struct dc *dc,
if (update_type == UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
- /* For phantom pipes we remove and create a new set of phantom pipes
- * for each full update (because we don't know if we'll need phantom
- * pipes until after the first round of validation). However, if validation
- * fails we need to keep the existing phantom pipes (because we don't update
- * the dc->current_state).
- *
- * The phantom stream/plane refcount is decremented for validation because
- * we assume it'll be removed (the free comes when the dc_state is freed),
- * but if validation fails we have to increment back the refcount so it's
- * consistent.
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, dc->current_state);
BREAK_TO_DEBUGGER();
goto fail;
}
@@ -3187,7 +3153,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
return true;
fail:
- dc_release_state(context);
+ dc_state_release(context);
return false;
@@ -3488,18 +3454,26 @@ static void commit_planes_for_stream_fast(struct dc *dc,
{
int i, j;
struct pipe_ctx *top_pipe_to_program = NULL;
+ struct dc_stream_status *stream_status = NULL;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
top_pipe_to_program = resource_get_otg_master_for_stream(
&context->res_ctx,
stream);
- if (dc->debug.visual_confirm) {
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ if (!top_pipe_to_program)
+ return;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
}
@@ -3523,6 +3497,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
}
}
+ stream_status = dc_state_get_stream_status(context, stream);
+
build_dmub_cmd_list(dc,
srf_updates,
surface_count,
@@ -3535,7 +3511,8 @@ static void commit_planes_for_stream_fast(struct dc *dc,
context->dmub_cmd_count,
context->block_sequence,
&(context->block_sequence_steps),
- top_pipe_to_program);
+ top_pipe_to_program,
+ stream_status);
hwss_execute_sequence(dc,
context->block_sequence,
context->block_sequence_steps);
@@ -3548,7 +3525,7 @@ static void commit_planes_for_stream_fast(struct dc *dc,
top_pipe_to_program->stream->update_flags.raw = 0;
}
-static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context)
+static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context)
{
/*
* This function calls HWSS to wait for any potentially double buffered
@@ -3586,6 +3563,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state
}
}
}
+ wait_for_odm_update_pending_complete(dc, dc_context);
}
static void commit_planes_for_stream(struct dc *dc,
@@ -3607,6 +3585,8 @@ static void commit_planes_for_stream(struct dc *dc,
// dc->current_state anymore, so we have to cache it before we apply
// the new SubVP context
subvp_prev_use = false;
+ dc_exit_ips_for_hw_access(dc);
+
dc_z10_restore(dc);
if (update_type == UPDATE_TYPE_FULL)
wait_for_outstanding_hw_updates(dc, context);
@@ -3631,7 +3611,7 @@ static void commit_planes_for_stream(struct dc *dc,
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
// Check old context for SubVP
- subvp_prev_use |= (old_pipe->stream && old_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM);
+ subvp_prev_use |= (dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) == SUBVP_PHANTOM);
if (subvp_prev_use)
break;
}
@@ -3639,19 +3619,22 @@ static void commit_planes_for_stream(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
subvp_curr_use = true;
break;
}
}
- if (dc->debug.visual_confirm)
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && pipe->plane_state) {
+ set_p_state_switch_method(dc, context, pipe);
- if (pipe->stream && pipe->plane_state)
- dc_update_viusal_confirm_color(dc, context, pipe);
+ if (dc->debug.visual_confirm)
+ dc_update_visual_confirm_color(dc, context, pipe);
}
+ }
if (stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE) {
struct pipe_ctx *mpcc_pipe;
@@ -3918,7 +3901,9 @@ static void commit_planes_for_stream(struct dc *dc,
* programming has completed (we turn on phantom OTG in order
* to complete the plane disable for phantom pipes).
*/
- dc->hwss.apply_ctx_to_hw(dc, context);
+
+ if (dc->hwss.disable_phantom_streams)
+ dc->hwss.disable_phantom_streams(dc, context);
}
if (update_type != UPDATE_TYPE_FAST)
@@ -4024,7 +4009,7 @@ static bool could_mpcc_tree_change_for_active_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_NONE) {
subvp_active = true;
break;
}
@@ -4061,7 +4046,7 @@ struct pipe_split_policy_backup {
static void release_minimal_transition_state(struct dc *dc,
struct dc_state *context, struct pipe_split_policy_backup *policy)
{
- dc_release_state(context);
+ dc_state_release(context);
/* restore previous pipe split and odm policy */
if (!dc->config.is_vmin_only_asic)
dc->debug.pipe_split_policy = policy->mpc_policy;
@@ -4072,7 +4057,7 @@ static void release_minimal_transition_state(struct dc *dc,
static struct dc_state *create_minimal_transition_state(struct dc *dc,
struct dc_state *base_context, struct pipe_split_policy_backup *policy)
{
- struct dc_state *minimal_transition_context = dc_create_state(dc);
+ struct dc_state *minimal_transition_context = NULL;
unsigned int i, j;
if (!dc->config.is_vmin_only_asic) {
@@ -4084,7 +4069,9 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc,
policy->subvp_policy = dc->debug.force_disable_subvp;
dc->debug.force_disable_subvp = true;
- dc_resource_state_copy_construct(base_context, minimal_transition_context);
+ minimal_transition_context = dc_state_create_copy(base_context);
+ if (!minimal_transition_context)
+ return NULL;
/* commit minimal state */
if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false)) {
@@ -4116,7 +4103,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
bool success = false;
struct dc_state *minimal_transition_context;
struct pipe_split_policy_backup policy;
- struct mall_temp_config mall_temp_config;
/* commit based on new context */
/* Since all phantom pipes are removed in full validation,
@@ -4125,8 +4111,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
minimal_transition_context = create_minimal_transition_state(dc,
context, &policy);
if (minimal_transition_context) {
@@ -4139,16 +4123,6 @@ static bool commit_minimal_transition_state_for_windowed_mpo_odm(struct dc *dc,
success = dc_commit_state_no_check(dc, minimal_transition_context) == DC_OK;
}
release_minimal_transition_state(dc, minimal_transition_context, &policy);
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
}
if (!success) {
@@ -4216,7 +4190,7 @@ static bool commit_minimal_transition_state(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_PHANTOM) {
subvp_in_use = true;
break;
}
@@ -4403,8 +4377,7 @@ static bool full_update_required(struct dc *dc,
stream_update->mst_bw_update ||
stream_update->func_shaper ||
stream_update->lut3d_func ||
- stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust))
+ stream_update->pending_test_pattern))
return true;
if (stream) {
@@ -4484,7 +4457,6 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *context;
enum surface_update_type update_type;
int i;
- struct mall_temp_config mall_temp_config;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
/* In cases where MPO and split or ODM are used transitions can
@@ -4495,6 +4467,8 @@ bool dc_update_planes_and_stream(struct dc *dc,
bool is_plane_addition = 0;
bool is_fast_update_only;
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
is_fast_update_only = fast_update_only(dc, fast_update, srf_updates,
surface_count, stream_update, stream);
@@ -4528,23 +4502,10 @@ bool dc_update_planes_and_stream(struct dc *dc,
* pipe as subvp/phantom will be cleared (dc copy constructor
* creates a shallow copy).
*/
- if (dc->res_pool->funcs->save_mall_state)
- dc->res_pool->funcs->save_mall_state(dc, context, &mall_temp_config);
if (!commit_minimal_transition_state(dc, context)) {
- dc_release_state(context);
+ dc_state_release(context);
return false;
}
- if (dc->res_pool->funcs->restore_mall_state)
- dc->res_pool->funcs->restore_mall_state(dc, context, &mall_temp_config);
-
- /* If we do a minimal transition with plane removal and the context
- * has subvp we also have to retain back the phantom stream / planes
- * since the refcount is decremented as part of the min transition
- * (we commit a state with no subvp, so the phantom streams / planes
- * had to be removed).
- */
- if (dc->res_pool->funcs->retain_phantom_pipes)
- dc->res_pool->funcs->retain_phantom_pipes(dc, context);
update_type = UPDATE_TYPE_FULL;
}
@@ -4601,7 +4562,7 @@ bool dc_update_planes_and_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
// clear any forced full updates
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -4628,6 +4589,8 @@ void dc_commit_updates_for_stream(struct dc *dc,
int i, j;
struct dc_fast_update fast_update[MAX_SURFACES] = {0};
+ dc_exit_ips_for_hw_access(dc);
+
populate_fast_updates(fast_update, srf_updates, surface_count, stream_update);
stream_status = dc_stream_get_status(stream);
context = dc->current_state;
@@ -4660,14 +4623,12 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
/* initialize scratch memory for building context */
- context = dc_create_state(dc);
+ context = dc_state_create_copy(state);
if (context == NULL) {
DC_ERROR("Failed to allocate new validate context!\n");
return;
}
- dc_resource_state_copy_construct(state, context);
-
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
@@ -4706,7 +4667,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (update_type >= UPDATE_TYPE_FULL) {
if (!dc->res_pool->funcs->validate_bandwidth(dc, context, false)) {
DC_ERROR("Mode validation failed for stream update!\n");
- dc_release_state(context);
+ dc_state_release(context);
return;
}
}
@@ -4739,7 +4700,7 @@ void dc_commit_updates_for_stream(struct dc *dc,
struct dc_state *old = dc->current_state;
dc->current_state = context;
- dc_release_state(old);
+ dc_state_release(old);
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -4812,7 +4773,9 @@ void dc_set_power_state(
switch (power_state) {
case DC_ACPI_CM_POWER_STATE_D0:
- dc_resource_state_construct(dc, dc->current_state);
+ dc_state_construct(dc, dc->current_state);
+
+ dc_exit_ips_for_hw_access(dc);
dc_z10_restore(dc);
@@ -4827,7 +4790,7 @@ void dc_set_power_state(
default:
ASSERT(dc->current_state->stream_count == 0);
- dc_resource_state_destruct(dc->current_state);
+ dc_state_destruct(dc->current_state);
break;
}
@@ -4904,6 +4867,38 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
return true;
}
+/* enable/disable eDP Replay without specify stream for eDP */
+bool dc_set_replay_allow_active(struct dc *dc, bool active)
+{
+ int i;
+ bool allow_active;
+
+ for (i = 0; i < dc->current_state->stream_count; i++) {
+ struct dc_link *link;
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ link = stream->link;
+ if (!link)
+ continue;
+
+ if (link->replay_settings.replay_feature_enabled) {
+ if (active && !link->replay_settings.replay_allow_active) {
+ allow_active = true;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ false, false, NULL))
+ return false;
+ } else if (!active && link->replay_settings.replay_allow_active) {
+ allow_active = false;
+ if (!dc_link_set_replay_allow_active(link, &allow_active,
+ true, false, NULL))
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
void dc_allow_idle_optimizations(struct dc *dc, bool allow)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -4923,6 +4918,12 @@ void dc_allow_idle_optimizations(struct dc *dc, bool allow)
dc->idle_optimizations_allowed = allow;
}
+void dc_exit_ips_for_hw_access(struct dc *dc)
+{
+ if (dc->caps.ips_support)
+ dc_allow_idle_optimizations(dc, false);
+}
+
bool dc_dmub_is_ips_idle_state(struct dc *dc)
{
if (dc->debug.disable_idle_power_optimizations)
@@ -5443,6 +5444,8 @@ bool dc_abm_save_restore(
struct dc_link *link = stream->sink->link;
struct dc_link *edp_links[MAX_NUM_EDP];
+ if (link->replay_settings.replay_feature_enabled)
+ return false;
/*find primary pipe associated with stream*/
for (i = 0; i < MAX_PIPES; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
index fc18b9dc9..9c05b1a07 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c
@@ -31,6 +31,7 @@
#include "basics/dc_common.h"
#include "resource.h"
#include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
#define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0]))
@@ -425,45 +426,130 @@ void get_hdr_visual_confirm_color(
}
void get_subvp_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
{
uint32_t color_value = MAX_TG_COLOR_VALUE;
- bool enable_subvp = false;
- int i;
-
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !context)
- return;
+ if (pipe_ctx) {
+ switch (pipe_ctx->p_state_type) {
+ case P_STATE_SUB_VP:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_DRR_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_V_BLANK_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ default:
+ break;
+ }
+ }
+}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+void get_mclk_switch_visual_confirm_color(
+ struct pipe_ctx *pipe_ctx,
+ struct tg_color *color)
+{
+ uint32_t color_value = MAX_TG_COLOR_VALUE;
- if (pipe->stream && pipe->stream->mall_stream_config.paired_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- /* SubVP enable - red */
- color->color_g_y = 0;
+ if (pipe_ctx) {
+ switch (pipe_ctx->p_state_type) {
+ case P_STATE_V_BLANK:
+ color->color_r_cr = color_value;
+ color->color_g_y = color_value;
color->color_b_cb = 0;
+ break;
+ case P_STATE_FPO:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = color_value;
+ break;
+ case P_STATE_V_ACTIVE:
color->color_r_cr = color_value;
- enable_subvp = true;
-
- if (pipe_ctx->stream == pipe->stream)
- return;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ case P_STATE_SUB_VP:
+ color->color_r_cr = color_value;
+ color->color_g_y = 0;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_DRR_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = color_value;
+ color->color_b_cb = 0;
+ break;
+ case P_STATE_V_BLANK_SUB_VP:
+ color->color_r_cr = 0;
+ color->color_g_y = 0;
+ color->color_b_cb = color_value;
+ break;
+ default:
break;
}
}
+}
- if (enable_subvp && pipe_ctx->stream->mall_stream_config.type == SUBVP_NONE) {
- color->color_r_cr = 0;
- if (pipe_ctx->stream->allow_freesync == 1) {
- /* SubVP enable and DRR on - green */
- color->color_b_cb = 0;
- color->color_g_y = color_value;
+void set_p_state_switch_method(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
+ bool enable_subvp;
+
+ if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
+ return;
+
+ if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
+ dm_dram_clock_change_unsupported) {
+ /* MCLK switching is supported */
+ if (!pipe_ctx->has_vactive_margin) {
+ /* In Vblank - yellow */
+ pipe_ctx->p_state_type = P_STATE_V_BLANK;
+
+ if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
+ /* FPO + Vblank - cyan */
+ pipe_ctx->p_state_type = P_STATE_FPO;
+ }
} else {
- /* SubVP enable and No DRR - blue */
- color->color_g_y = 0;
- color->color_b_cb = color_value;
+ /* In Vactive - pink */
+ pipe_ctx->p_state_type = P_STATE_V_ACTIVE;
+ }
+
+ /* SubVP */
+ enable_subvp = false;
+
+ for (int i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream && dc_state_get_paired_subvp_stream(context, pipe->stream) &&
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ /* SubVP enable - red */
+ pipe_ctx->p_state_type = P_STATE_SUB_VP;
+ enable_subvp = true;
+
+ if (pipe_ctx->stream == pipe->stream)
+ return;
+ break;
+ }
+ }
+
+ if (enable_subvp && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_NONE) {
+ if (pipe_ctx->stream->allow_freesync == 1) {
+ /* SubVP enable and DRR on - green */
+ pipe_ctx->p_state_type = P_STATE_DRR_SUB_VP;
+ } else {
+ /* SubVP enable and No DRR - blue */
+ pipe_ctx->p_state_type = P_STATE_V_BLANK_SUB_VP;
+ }
}
}
}
@@ -473,7 +559,8 @@ void hwss_build_fast_sequence(struct dc *dc,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
int *num_steps,
- struct pipe_ctx *pipe_ctx)
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_status *stream_status)
{
struct dc_plane_state *plane = pipe_ctx->plane_state;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -490,7 +577,8 @@ void hwss_build_fast_sequence(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock_fast) {
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = true;
- block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+ plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -529,7 +617,7 @@ void hwss_build_fast_sequence(struct dc *dc,
}
if (dc->hwss.update_plane_addr && current_mpc_pipe->plane_state->update_flags.bits.addr_update) {
if (resource_is_pipe_type(current_mpc_pipe, OTG_MASTER) &&
- current_mpc_pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ stream_status->mall_stream_config.type == SUBVP_MAIN) {
block_sequence[*num_steps].params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
block_sequence[*num_steps].params.subvp_save_surf_addr.addr = &current_mpc_pipe->plane_state->address;
block_sequence[*num_steps].params.subvp_save_surf_addr.subvp_index = current_mpc_pipe->subvp_index;
@@ -612,7 +700,8 @@ void hwss_build_fast_sequence(struct dc *dc,
if (dc->hwss.subvp_pipe_control_lock_fast) {
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.dc = dc;
block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.lock = false;
- block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.pipe_ctx = pipe_ctx;
+ block_sequence[*num_steps].params.subvp_pipe_control_lock_fast_params.subvp_immediate_flip =
+ plane->flip_immediate && stream_status->mall_stream_config.type == SUBVP_MAIN;
block_sequence[*num_steps].func = DMUB_SUBVP_PIPE_CONTROL_LOCK_FAST;
(*num_steps)++;
}
@@ -812,42 +901,6 @@ void hwss_subvp_save_surf_addr(union block_sequence_params *params)
dc_dmub_srv_subvp_save_surf_addr(dc_dmub_srv, addr, subvp_index);
}
-void get_mclk_switch_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
- struct pipe_ctx *pipe_ctx,
- struct tg_color *color)
-{
- uint32_t color_value = MAX_TG_COLOR_VALUE;
- struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
-
- if (!dc->ctx || !dc->ctx->dmub_srv || !pipe_ctx || !vba || !context)
- return;
-
- if (vba->DRAMClockChangeSupport[vba->VoltageLevel][vba->maxMpcComb] !=
- dm_dram_clock_change_unsupported) {
- /* MCLK switching is supported */
- if (!pipe_ctx->has_vactive_margin) {
- /* In Vblank - yellow */
- color->color_r_cr = color_value;
- color->color_g_y = color_value;
-
- if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
- /* FPO + Vblank - cyan */
- color->color_r_cr = 0;
- color->color_g_y = color_value;
- color->color_b_cb = color_value;
- }
- } else {
- /* In Vactive - pink */
- color->color_r_cr = color_value;
- color->color_b_cb = color_value;
- }
- /* SubVP */
- get_subvp_visual_confirm_color(dc, context, pipe_ctx, color);
- }
-}
-
void get_surface_tile_visual_confirm_color(
struct pipe_ctx *pipe_ctx,
struct tg_color *color)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
index f365773d5..c6c35037b 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c
@@ -467,6 +467,13 @@ bool dc_link_setup_psr(struct dc_link *link,
return link->dc->link_srv->edp_setup_psr(link, stream, psr_config, psr_context);
}
+bool dc_link_set_replay_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
+{
+ return link->dc->link_srv->edp_set_replay_allow_active(link, allow_active, wait,
+ force_static, power_opts);
+}
+
bool dc_link_get_replay_state(const struct dc_link *link, uint64_t *state)
{
return link->dc->link_srv->edp_get_replay_state(link, state);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 990d775e4..9fbdb0969 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -42,6 +42,7 @@
#include "link_enc_cfg.h"
#include "link.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#include "virtual/virtual_link_hwss.h"
#include "link/hwss/link_hwss_dio.h"
#include "link/hwss/link_hwss_dpia.h"
@@ -69,8 +70,8 @@
#include "dcn314/dcn314_resource.h"
#include "dcn315/dcn315_resource.h"
#include "dcn316/dcn316_resource.h"
-#include "../dcn32/dcn32_resource.h"
-#include "../dcn321/dcn321_resource.h"
+#include "dcn32/dcn32_resource.h"
+#include "dcn321/dcn321_resource.h"
#include "dcn35/dcn35_resource.h"
#define VISUAL_CONFIRM_BASE_DEFAULT 3
@@ -1764,6 +1765,29 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx(
return free_pipe_idx;
}
+int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool)
+{
+ int free_pipe_idx = FREE_PIPE_INDEX_NOT_FOUND;
+ const struct pipe_ctx *new_pipe, *cur_pipe;
+ int i;
+
+ for (i = 0; i < pool->pipe_count; i++) {
+ cur_pipe = &cur_res_ctx->pipe_ctx[i];
+ new_pipe = &new_res_ctx->pipe_ctx[i];
+
+ if (resource_is_pipe_type(cur_pipe, OTG_MASTER) &&
+ resource_is_pipe_type(new_pipe, FREE_PIPE)) {
+ free_pipe_idx = i;
+ break;
+ }
+ }
+
+ return free_pipe_idx;
+}
+
int resource_find_free_pipe_used_as_cur_sec_dpp_in_mpcc_combine(
const struct resource_context *cur_res_ctx,
struct resource_context *new_res_ctx,
@@ -2440,6 +2464,9 @@ void resource_remove_otg_master_for_stream_output(struct dc_state *context,
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(
&context->res_ctx, stream);
+ if (!otg_master)
+ return;
+
ASSERT(resource_get_odm_slice_count(otg_master) == 1);
ASSERT(otg_master->plane_state == NULL);
ASSERT(otg_master->stream_res.stream_enc);
@@ -2974,190 +3001,6 @@ bool resource_update_pipes_for_plane_with_slice_count(
return result;
}
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context)
-{
- struct resource_pool *pool = dc->res_pool;
- struct pipe_ctx *otg_master_pipe;
- struct dc_stream_status *stream_status = NULL;
- bool added = false;
-
- stream_status = dc_stream_get_status_from_state(context, stream);
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to attach surface!\n");
- goto out;
- } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
- dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
- plane_state, MAX_SURFACE_NUM);
- goto out;
- }
-
- otg_master_pipe = resource_get_otg_master_for_stream(
- &context->res_ctx, stream);
- if (otg_master_pipe)
- added = resource_append_dpp_pipes_for_plane_composition(context,
- dc->current_state, pool, otg_master_pipe, plane_state);
-
- if (added) {
- stream_status->plane_states[stream_status->plane_count] =
- plane_state;
- stream_status->plane_count++;
- dc_plane_state_retain(plane_state);
- }
-
-out:
- return added;
-}
-
-bool dc_remove_plane_from_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context)
-{
- int i;
- struct dc_stream_status *stream_status = NULL;
- struct resource_pool *pool = dc->res_pool;
-
- if (!plane_state)
- return true;
-
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
- }
-
- if (stream_status == NULL) {
- dm_error("Existing stream not found; failed to remove plane.\n");
- return false;
- }
-
- resource_remove_dpp_pipes_for_plane_composition(
- context, pool, plane_state);
-
- for (i = 0; i < stream_status->plane_count; i++) {
- if (stream_status->plane_states[i] == plane_state) {
- dc_plane_state_release(stream_status->plane_states[i]);
- break;
- }
- }
-
- if (i == stream_status->plane_count) {
- dm_error("Existing plane_state not found; failed to detach it!\n");
- return false;
- }
-
- stream_status->plane_count--;
-
- /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
- for (; i < stream_status->plane_count; i++)
- stream_status->plane_states[i] = stream_status->plane_states[i + 1];
-
- stream_status->plane_states[stream_status->plane_count] = NULL;
-
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
- /* ODM combine could prevent us from supporting more planes
- * we will reset ODM slice count back to 1 when all planes have
- * been removed to maximize the amount of planes supported when
- * new planes are added.
- */
- resource_update_pipes_for_stream_with_slice_count(
- context, dc->current_state, dc->res_pool, stream, 1);
-
- return true;
-}
-
-/**
- * dc_rem_all_planes_for_stream - Remove planes attached to the target stream.
- *
- * @dc: Current dc state.
- * @stream: Target stream, which we want to remove the attached plans.
- * @context: New context.
- *
- * Return:
- * Return true if DC was able to remove all planes from the target
- * stream, otherwise, return false.
- */
-bool dc_rem_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context)
-{
- int i, old_plane_count;
- struct dc_stream_status *stream_status = NULL;
- struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
-
- for (i = 0; i < context->stream_count; i++)
- if (context->streams[i] == stream) {
- stream_status = &context->stream_status[i];
- break;
- }
-
- if (stream_status == NULL) {
- dm_error("Existing stream %p not found!\n", stream);
- return false;
- }
-
- old_plane_count = stream_status->plane_count;
-
- for (i = 0; i < old_plane_count; i++)
- del_planes[i] = stream_status->plane_states[i];
-
- for (i = 0; i < old_plane_count; i++)
- if (!dc_remove_plane_from_context(dc, stream, del_planes[i], context))
- return false;
-
- return true;
-}
-
-static bool add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- const struct dc_validation_set set[],
- int set_count,
- struct dc_state *context)
-{
- int i, j;
-
- for (i = 0; i < set_count; i++)
- if (set[i].stream == stream)
- break;
-
- if (i == set_count) {
- dm_error("Stream %p not found in set!\n", stream);
- return false;
- }
-
- for (j = 0; j < set[i].plane_count; j++)
- if (!dc_add_plane_to_context(dc, stream, set[i].plane_states[j], context))
- return false;
-
- return true;
-}
-
-bool dc_add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state * const *plane_states,
- int plane_count,
- struct dc_state *context)
-{
- struct dc_validation_set set;
- int i;
-
- set.stream = stream;
- set.plane_count = plane_count;
-
- for (i = 0; i < plane_count; i++)
- set.plane_states[i] = plane_states[i];
-
- return add_all_planes_for_stream(dc, stream, &set, 1, context);
-}
-
bool dc_is_timing_changed(struct dc_stream_state *cur_stream,
struct dc_stream_state *new_stream)
{
@@ -3309,84 +3152,6 @@ static struct audio *find_first_free_audio(
return NULL;
}
-/*
- * dc_add_stream_to_ctx() - Add a new dc_stream_state to a dc_state.
- */
-enum dc_status dc_add_stream_to_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream)
-{
- enum dc_status res;
- DC_LOGGER_INIT(dc->ctx->logger);
-
- if (new_ctx->stream_count >= dc->res_pool->timing_generator_count) {
- DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- new_ctx->streams[new_ctx->stream_count] = stream;
- dc_stream_retain(stream);
- new_ctx->stream_count++;
-
- res = resource_add_otg_master_for_stream_output(
- new_ctx, dc->res_pool, stream);
- if (res != DC_OK)
- DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
-
- return res;
-}
-
-/*
- * dc_remove_stream_from_ctx() - Remove a stream from a dc_state.
- */
-enum dc_status dc_remove_stream_from_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream)
-{
- int i;
- struct dc_context *dc_ctx = dc->ctx;
- struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
- &new_ctx->res_ctx, stream);
-
- if (!del_pipe) {
- DC_ERROR("Pipe not found for stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- resource_update_pipes_for_stream_with_slice_count(new_ctx,
- dc->current_state, dc->res_pool, stream, 1);
- resource_remove_otg_master_for_stream_output(
- new_ctx, dc->res_pool, stream);
-
- for (i = 0; i < new_ctx->stream_count; i++)
- if (new_ctx->streams[i] == stream)
- break;
-
- if (new_ctx->streams[i] != stream) {
- DC_ERROR("Context doesn't have stream %p !\n", stream);
- return DC_ERROR_UNEXPECTED;
- }
-
- dc_stream_release(new_ctx->streams[i]);
- new_ctx->stream_count--;
-
- /* Trim back arrays */
- for (; i < new_ctx->stream_count; i++) {
- new_ctx->streams[i] = new_ctx->streams[i + 1];
- new_ctx->stream_status[i] = new_ctx->stream_status[i + 1];
- }
-
- new_ctx->streams[new_ctx->stream_count] = NULL;
- memset(
- &new_ctx->stream_status[new_ctx->stream_count],
- 0,
- sizeof(new_ctx->stream_status[0]));
-
- return DC_OK;
-}
-
static struct dc_stream_state *find_pll_sharable_stream(
struct dc_stream_state *stream_needs_pll,
struct dc_state *context)
@@ -3594,6 +3359,7 @@ static void mark_seamless_boot_stream(
* |________|_______________|___________|_____________|
*/
static bool acquire_otg_master_pipe_for_stream(
+ const struct dc_state *cur_ctx,
struct dc_state *new_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream)
@@ -3607,7 +3373,22 @@ static bool acquire_otg_master_pipe_for_stream(
int pipe_idx;
struct pipe_ctx *pipe_ctx = NULL;
- pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
+ /*
+ * Upper level code is responsible to optimize unnecessary addition and
+ * removal for unchanged streams. So unchanged stream will keep the same
+ * OTG master instance allocated. When current stream is removed and a
+ * new stream is added, we want to reuse the OTG instance made available
+ * by the removed stream first. If not found, we try to avoid of using
+ * any free pipes already used in current context as this could tear
+ * down exiting ODM/MPC/MPO configuration unnecessarily.
+ */
+ pipe_idx = recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ pipe_idx = recource_find_free_pipe_not_used_in_cur_res_ctx(
+ &cur_ctx->res_ctx, &new_ctx->res_ctx, pool);
+ if (pipe_idx == FREE_PIPE_INDEX_NOT_FOUND)
+ pipe_idx = resource_find_any_free_pipe(&new_ctx->res_ctx, pool);
if (pipe_idx != FREE_PIPE_INDEX_NOT_FOUND) {
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
memset(pipe_ctx, 0, sizeof(*pipe_ctx));
@@ -3667,7 +3448,7 @@ enum dc_status resource_map_pool_resources(
if (!acquired)
/* acquire new resources */
- acquired = acquire_otg_master_pipe_for_stream(
+ acquired = acquire_otg_master_pipe_for_stream(dc->current_state,
context, pool, stream);
pipe_ctx = resource_get_otg_master_for_stream(&context->res_ctx, stream);
@@ -3750,35 +3531,6 @@ enum dc_status resource_map_pool_resources(
return DC_ERROR_UNEXPECTED;
}
-/**
- * dc_resource_state_copy_construct_current() - Creates a new dc_state from existing state
- *
- * @dc: copy out of dc->current_state
- * @dst_ctx: copy into this
- *
- * This function makes a shallow copy of the current DC state and increments
- * refcounts on existing streams and planes.
- */
-void dc_resource_state_copy_construct_current(
- const struct dc *dc,
- struct dc_state *dst_ctx)
-{
- dc_resource_state_copy_construct(dc->current_state, dst_ctx);
-}
-
-
-void dc_resource_state_construct(
- const struct dc *dc,
- struct dc_state *dst_ctx)
-{
- dst_ctx->clk_mgr = dc->clk_mgr;
-
- /* Initialise DIG link encoder resource tracking variables. */
- if (dc->res_pool)
- link_enc_cfg_init(dc, dst_ctx);
-}
-
-
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc)
{
if (dc->res_pool == NULL)
@@ -3822,6 +3574,31 @@ static bool planes_changed_for_existing_stream(struct dc_state *context,
return false;
}
+static bool add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ const struct dc_validation_set set[],
+ int set_count,
+ struct dc_state *state)
+{
+ int i, j;
+
+ for (i = 0; i < set_count; i++)
+ if (set[i].stream == stream)
+ break;
+
+ if (i == set_count) {
+ dm_error("Stream %p not found in set!\n", stream);
+ return false;
+ }
+
+ for (j = 0; j < set[i].plane_count; j++)
+ if (!dc_state_add_plane(dc, stream, set[i].plane_states[j], state))
+ return false;
+
+ return true;
+}
+
/**
* dc_validate_with_context - Validate and update the potential new stream in the context object
*
@@ -3927,7 +3704,8 @@ enum dc_status dc_validate_with_context(struct dc *dc,
unchanged_streams[i],
set,
set_count)) {
- if (!dc_rem_all_planes_for_stream(dc,
+
+ if (!dc_state_rem_all_planes_for_stream(dc,
unchanged_streams[i],
context)) {
res = DC_FAIL_DETACH_SURFACES;
@@ -3949,12 +3727,24 @@ enum dc_status dc_validate_with_context(struct dc *dc,
}
}
- if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
- res = DC_FAIL_DETACH_SURFACES;
- goto fail;
+ if (dc_state_get_stream_subvp_type(context, del_streams[i]) == SUBVP_PHANTOM) {
+ /* remove phantoms specifically */
+ if (!dc_state_rem_all_phantom_planes_for_stream(dc, del_streams[i], context, true)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_phantom_stream(dc, context, del_streams[i]);
+ dc_state_release_phantom_stream(dc, context, del_streams[i]);
+ } else {
+ if (!dc_state_rem_all_planes_for_stream(dc, del_streams[i], context)) {
+ res = DC_FAIL_DETACH_SURFACES;
+ goto fail;
+ }
+
+ res = dc_state_remove_stream(dc, context, del_streams[i]);
}
- res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
if (res != DC_OK)
goto fail;
}
@@ -3977,7 +3767,7 @@ enum dc_status dc_validate_with_context(struct dc *dc,
/* Add new streams and then add all planes for the new stream */
for (i = 0; i < add_streams_count; i++) {
calculate_phy_pix_clks(add_streams[i]);
- res = dc_add_stream_to_ctx(dc, context, add_streams[i]);
+ res = dc_state_add_stream(dc, context, add_streams[i]);
if (res != DC_OK)
goto fail;
@@ -4483,84 +4273,6 @@ static void set_vtem_info_packet(
*info_packet = stream->vtem_infopacket;
}
-void dc_resource_state_destruct(struct dc_state *context)
-{
- int i, j;
-
- for (i = 0; i < context->stream_count; i++) {
- for (j = 0; j < context->stream_status[i].plane_count; j++)
- dc_plane_state_release(
- context->stream_status[i].plane_states[j]);
-
- context->stream_status[i].plane_count = 0;
- dc_stream_release(context->streams[i]);
- context->streams[i] = NULL;
- }
- context->stream_count = 0;
- context->stream_mask = 0;
- memset(&context->res_ctx, 0, sizeof(context->res_ctx));
- memset(&context->pp_display_cfg, 0, sizeof(context->pp_display_cfg));
- memset(&context->dcn_bw_vars, 0, sizeof(context->dcn_bw_vars));
- context->clk_mgr = NULL;
- memset(&context->bw_ctx.bw, 0, sizeof(context->bw_ctx.bw));
- memset(context->block_sequence, 0, sizeof(context->block_sequence));
- context->block_sequence_steps = 0;
- memset(context->dc_dmub_cmd, 0, sizeof(context->dc_dmub_cmd));
- context->dmub_cmd_count = 0;
- memset(&context->perf_params, 0, sizeof(context->perf_params));
- memset(&context->scratch, 0, sizeof(context->scratch));
-}
-
-void dc_resource_state_copy_construct(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx)
-{
- int i, j;
- struct kref refcount = dst_ctx->refcount;
-#ifdef CONFIG_DRM_AMD_DC_FP
- struct dml2_context *dml2 = NULL;
-
- // Need to preserve allocated dml2 context
- if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
- dml2 = dst_ctx->bw_ctx.dml2;
-#endif
-
- *dst_ctx = *src_ctx;
-
-#ifdef CONFIG_DRM_AMD_DC_FP
- // Preserve allocated dml2 context
- if (src_ctx->clk_mgr && src_ctx->clk_mgr->ctx->dc->debug.using_dml2)
- dst_ctx->bw_ctx.dml2 = dml2;
-#endif
-
- for (i = 0; i < MAX_PIPES; i++) {
- struct pipe_ctx *cur_pipe = &dst_ctx->res_ctx.pipe_ctx[i];
-
- if (cur_pipe->top_pipe)
- cur_pipe->top_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
-
- if (cur_pipe->bottom_pipe)
- cur_pipe->bottom_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
-
- if (cur_pipe->next_odm_pipe)
- cur_pipe->next_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
-
- if (cur_pipe->prev_odm_pipe)
- cur_pipe->prev_odm_pipe = &dst_ctx->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
- }
-
- for (i = 0; i < dst_ctx->stream_count; i++) {
- dc_stream_retain(dst_ctx->streams[i]);
- for (j = 0; j < dst_ctx->stream_status[i].plane_count; j++)
- dc_plane_state_retain(
- dst_ctx->stream_status[i].plane_states[j]);
- }
-
- /* context refcount should not be overridden */
- dst_ctx->refcount = refcount;
-
-}
-
struct clock_source *dc_resource_find_first_free_pll(
struct resource_context *res_ctx,
const struct resource_pool *pool)
@@ -4740,7 +4452,7 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
option = DITHER_OPTION_SPATIAL8;
break;
case COLOR_DEPTH_101010:
- option = DITHER_OPTION_SPATIAL10;
+ option = DITHER_OPTION_TRUN10;
break;
default:
option = DITHER_OPTION_DISABLE;
@@ -4766,6 +4478,8 @@ void resource_build_bit_depth_reduction_params(struct dc_stream_state *stream,
option == DITHER_OPTION_TRUN10_SPATIAL8_FM6) {
fmt_bit_depth->flags.TRUNCATE_ENABLED = 1;
fmt_bit_depth->flags.TRUNCATE_DEPTH = 2;
+ if (option == DITHER_OPTION_TRUN10)
+ fmt_bit_depth->flags.TRUNCATE_MODE = 1;
}
/* special case - Formatter can only reduce by 4 bits at most.
@@ -5283,7 +4997,7 @@ bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_st
if (dc->current_state->stream_count == 1 && stream->timing.v_addressable >= 2880 &&
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
return true;
- else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 2160 &&
+ else if (dc->current_state->stream_count > 1 && stream->timing.v_addressable >= 1080 &&
((stream->timing.pix_clk_100hz * 100) / stream->timing.v_total / stream->timing.h_total) < 120)
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
new file mode 100644
index 000000000..61986e5cb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -0,0 +1,880 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "core_types.h"
+#include "core_status.h"
+#include "dc_state.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
+#include "dc_plane_priv.h"
+
+#include "dm_services.h"
+#include "resource.h"
+#include "link_enc_cfg.h"
+
+#include "dml2/dml2_wrapper.h"
+#include "dml2/dml2_internal_types.h"
+
+#define DC_LOGGER \
+ dc->ctx->logger
+#define DC_LOGGER_INIT(logger)
+
+/* Private dc_state helper functions */
+static bool dc_state_track_phantom_stream(struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ if (state->phantom_stream_count >= MAX_PHANTOM_PIPES)
+ return false;
+
+ state->phantom_streams[state->phantom_stream_count++] = phantom_stream;
+
+ return true;
+}
+
+static bool dc_state_untrack_phantom_stream(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+ bool res = false;
+ int i;
+
+ /* first find phantom stream in the dc_state */
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ if (state->phantom_streams[i] == phantom_stream) {
+ state->phantom_streams[i] = NULL;
+ res = true;
+ break;
+ }
+ }
+
+ /* failed to find stream in state */
+ if (!res)
+ return res;
+
+ /* trim back phantom streams */
+ state->phantom_stream_count--;
+ for (; i < state->phantom_stream_count; i++)
+ state->phantom_streams[i] = state->phantom_streams[i + 1];
+
+ return res;
+}
+
+static bool dc_state_is_phantom_stream_tracked(struct dc_state *state, struct dc_stream_state *phantom_stream)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ if (state->phantom_streams[i] == phantom_stream)
+ return true;
+ }
+
+ return false;
+}
+
+static bool dc_state_track_phantom_plane(struct dc_state *state,
+ struct dc_plane_state *phantom_plane)
+{
+ if (state->phantom_plane_count >= MAX_PHANTOM_PIPES)
+ return false;
+
+ state->phantom_planes[state->phantom_plane_count++] = phantom_plane;
+
+ return true;
+}
+
+static bool dc_state_untrack_phantom_plane(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+ bool res = false;
+ int i;
+
+ /* first find phantom plane in the dc_state */
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ if (state->phantom_planes[i] == phantom_plane) {
+ state->phantom_planes[i] = NULL;
+ res = true;
+ break;
+ }
+ }
+
+ /* failed to find plane in state */
+ if (!res)
+ return res;
+
+ /* trim back phantom planes */
+ state->phantom_plane_count--;
+ for (; i < state->phantom_plane_count; i++)
+ state->phantom_planes[i] = state->phantom_planes[i + 1];
+
+ return res;
+}
+
+static bool dc_state_is_phantom_plane_tracked(struct dc_state *state, struct dc_plane_state *phantom_plane)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ if (state->phantom_planes[i] == phantom_plane)
+ return true;
+ }
+
+ return false;
+}
+
+static void dc_state_copy_internal(struct dc_state *dst_state, struct dc_state *src_state)
+{
+ int i, j;
+
+ memcpy(dst_state, src_state, sizeof(struct dc_state));
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct pipe_ctx *cur_pipe = &dst_state->res_ctx.pipe_ctx[i];
+
+ if (cur_pipe->top_pipe)
+ cur_pipe->top_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+
+ if (cur_pipe->bottom_pipe)
+ cur_pipe->bottom_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+
+ if (cur_pipe->prev_odm_pipe)
+ cur_pipe->prev_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->prev_odm_pipe->pipe_idx];
+
+ if (cur_pipe->next_odm_pipe)
+ cur_pipe->next_odm_pipe = &dst_state->res_ctx.pipe_ctx[cur_pipe->next_odm_pipe->pipe_idx];
+ }
+
+ /* retain phantoms */
+ for (i = 0; i < dst_state->phantom_stream_count; i++)
+ dc_stream_retain(dst_state->phantom_streams[i]);
+
+ for (i = 0; i < dst_state->phantom_plane_count; i++)
+ dc_plane_state_retain(dst_state->phantom_planes[i]);
+
+ /* retain streams and planes */
+ for (i = 0; i < dst_state->stream_count; i++) {
+ dc_stream_retain(dst_state->streams[i]);
+ for (j = 0; j < dst_state->stream_status[i].plane_count; j++)
+ dc_plane_state_retain(
+ dst_state->stream_status[i].plane_states[j]);
+ }
+
+}
+
+static void init_state(struct dc *dc, struct dc_state *state)
+{
+ /* Each context must have their own instance of VBA and in order to
+ * initialize and obtain IP and SOC the base DML instance from DC is
+ * initially copied into every context
+ */
+ memcpy(&state->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+}
+
+/* Public dc_state functions */
+struct dc_state *dc_state_create(struct dc *dc)
+{
+ struct dc_state *state = kvzalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+
+ if (!state)
+ return NULL;
+
+ init_state(dc, state);
+ dc_state_construct(dc, state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (dc->debug.using_dml2)
+ dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2);
+#endif
+
+ kref_init(&state->refcount);
+
+ return state;
+}
+
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state)
+{
+ struct kref refcount = dst_state->refcount;
+#ifdef CONFIG_DRM_AMD_DC_FP
+ struct dml2_context *dst_dml2 = dst_state->bw_ctx.dml2;
+#endif
+
+ dc_state_copy_internal(dst_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dst_state->bw_ctx.dml2 = dst_dml2;
+ if (src_state->bw_ctx.dml2)
+ dml2_copy(dst_state->bw_ctx.dml2, src_state->bw_ctx.dml2);
+#endif
+
+ /* context refcount should not be overridden */
+ dst_state->refcount = refcount;
+}
+
+struct dc_state *dc_state_create_copy(struct dc_state *src_state)
+{
+ struct dc_state *new_state;
+
+ new_state = kvmalloc(sizeof(struct dc_state),
+ GFP_KERNEL);
+ if (!new_state)
+ return NULL;
+
+ dc_state_copy_internal(new_state, src_state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (src_state->bw_ctx.dml2 &&
+ !dml2_create_copy(&new_state->bw_ctx.dml2, src_state->bw_ctx.dml2)) {
+ dc_state_release(new_state);
+ return NULL;
+ }
+#endif
+
+ kref_init(&new_state->refcount);
+
+ return new_state;
+}
+
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state)
+{
+ dc_state_copy(dst_state, dc->current_state);
+}
+
+struct dc_state *dc_state_create_current_copy(struct dc *dc)
+{
+ return dc_state_create_copy(dc->current_state);
+}
+
+void dc_state_construct(struct dc *dc, struct dc_state *state)
+{
+ state->clk_mgr = dc->clk_mgr;
+
+ /* Initialise DIG link encoder resource tracking variables. */
+ if (dc->res_pool)
+ link_enc_cfg_init(dc, state);
+}
+
+void dc_state_destruct(struct dc_state *state)
+{
+ int i, j;
+
+ for (i = 0; i < state->stream_count; i++) {
+ for (j = 0; j < state->stream_status[i].plane_count; j++)
+ dc_plane_state_release(
+ state->stream_status[i].plane_states[j]);
+
+ state->stream_status[i].plane_count = 0;
+ dc_stream_release(state->streams[i]);
+ state->streams[i] = NULL;
+ }
+ state->stream_count = 0;
+
+ /* release tracked phantoms */
+ for (i = 0; i < state->phantom_stream_count; i++) {
+ dc_stream_release(state->phantom_streams[i]);
+ state->phantom_streams[i] = NULL;
+ }
+ state->phantom_stream_count = 0;
+
+ for (i = 0; i < state->phantom_plane_count; i++) {
+ dc_plane_state_release(state->phantom_planes[i]);
+ state->phantom_planes[i] = NULL;
+ }
+ state->phantom_plane_count = 0;
+
+ state->stream_mask = 0;
+ memset(&state->res_ctx, 0, sizeof(state->res_ctx));
+ memset(&state->pp_display_cfg, 0, sizeof(state->pp_display_cfg));
+ memset(&state->dcn_bw_vars, 0, sizeof(state->dcn_bw_vars));
+ state->clk_mgr = NULL;
+ memset(&state->bw_ctx.bw, 0, sizeof(state->bw_ctx.bw));
+ memset(state->block_sequence, 0, sizeof(state->block_sequence));
+ state->block_sequence_steps = 0;
+ memset(state->dc_dmub_cmd, 0, sizeof(state->dc_dmub_cmd));
+ state->dmub_cmd_count = 0;
+ memset(&state->perf_params, 0, sizeof(state->perf_params));
+ memset(&state->scratch, 0, sizeof(state->scratch));
+}
+
+void dc_state_retain(struct dc_state *state)
+{
+ kref_get(&state->refcount);
+}
+
+static void dc_state_free(struct kref *kref)
+{
+ struct dc_state *state = container_of(kref, struct dc_state, refcount);
+
+ dc_state_destruct(state);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ dml2_destroy(state->bw_ctx.dml2);
+ state->bw_ctx.dml2 = 0;
+#endif
+
+ kvfree(state);
+}
+
+void dc_state_release(struct dc_state *state)
+{
+ if (state != NULL)
+ kref_put(&state->refcount, dc_state_free);
+}
+/*
+ * dc_state_add_stream() - Add a new dc_stream_state to a dc_state.
+ */
+enum dc_status dc_state_add_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ enum dc_status res;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (state->stream_count >= dc->res_pool->timing_generator_count) {
+ DC_LOG_WARNING("Max streams reached, can't add stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ state->streams[state->stream_count] = stream;
+ dc_stream_retain(stream);
+ state->stream_count++;
+
+ res = resource_add_otg_master_for_stream_output(
+ state, dc->res_pool, stream);
+ if (res != DC_OK)
+ DC_LOG_WARNING("Adding stream %p to context failed with err %d!\n", stream, res);
+
+ return res;
+}
+
+/*
+ * dc_state_remove_stream() - Remove a stream from a dc_state.
+ */
+enum dc_status dc_state_remove_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ int i;
+ struct pipe_ctx *del_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
+
+ if (!del_pipe) {
+ dm_error("Pipe not found for stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ resource_update_pipes_for_stream_with_slice_count(state,
+ dc->current_state, dc->res_pool, stream, 1);
+ resource_remove_otg_master_for_stream_output(
+ state, dc->res_pool, stream);
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream)
+ break;
+
+ if (state->streams[i] != stream) {
+ dm_error("Context doesn't have stream %p !\n", stream);
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ dc_stream_release(state->streams[i]);
+ state->stream_count--;
+
+ /* Trim back arrays */
+ for (; i < state->stream_count; i++) {
+ state->streams[i] = state->streams[i + 1];
+ state->stream_status[i] = state->stream_status[i + 1];
+ }
+
+ state->streams[state->stream_count] = NULL;
+ memset(
+ &state->stream_status[state->stream_count],
+ 0,
+ sizeof(state->stream_status[0]));
+
+ return DC_OK;
+}
+
+bool dc_state_add_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state)
+{
+ struct resource_pool *pool = dc->res_pool;
+ struct pipe_ctx *otg_master_pipe;
+ struct dc_stream_status *stream_status = NULL;
+ bool added = false;
+
+ stream_status = dc_state_get_stream_status(state, stream);
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to attach surface!\n");
+ goto out;
+ } else if (stream_status->plane_count == MAX_SURFACE_NUM) {
+ dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
+ plane_state, MAX_SURFACE_NUM);
+ goto out;
+ }
+
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
+ if (otg_master_pipe)
+ added = resource_append_dpp_pipes_for_plane_composition(state,
+ dc->current_state, pool, otg_master_pipe, plane_state);
+
+ if (added) {
+ stream_status->plane_states[stream_status->plane_count] =
+ plane_state;
+ stream_status->plane_count++;
+ dc_plane_state_retain(plane_state);
+ }
+
+out:
+ return added;
+}
+
+bool dc_state_remove_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state)
+{
+ int i;
+ struct dc_stream_status *stream_status = NULL;
+ struct resource_pool *pool = dc->res_pool;
+
+ if (!plane_state)
+ return true;
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream not found; failed to remove plane.\n");
+ return false;
+ }
+
+ resource_remove_dpp_pipes_for_plane_composition(
+ state, pool, plane_state);
+
+ for (i = 0; i < stream_status->plane_count; i++) {
+ if (stream_status->plane_states[i] == plane_state) {
+ dc_plane_state_release(stream_status->plane_states[i]);
+ break;
+ }
+ }
+
+ if (i == stream_status->plane_count) {
+ dm_error("Existing plane_state not found; failed to detach it!\n");
+ return false;
+ }
+
+ stream_status->plane_count--;
+
+ /* Start at the plane we've just released, and move all the planes one index forward to "trim" the array */
+ for (; i < stream_status->plane_count; i++)
+ stream_status->plane_states[i] = stream_status->plane_states[i + 1];
+
+ stream_status->plane_states[stream_status->plane_count] = NULL;
+
+ if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
+ /* ODM combine could prevent us from supporting more planes
+ * we will reset ODM slice count back to 1 when all planes have
+ * been removed to maximize the amount of planes supported when
+ * new planes are added.
+ */
+ resource_update_pipes_for_stream_with_slice_count(
+ state, dc->current_state, dc->res_pool, stream, 1);
+
+ return true;
+}
+
+/**
+ * dc_state_rem_all_planes_for_stream - Remove planes attached to the target stream.
+ *
+ * @dc: Current dc state.
+ * @stream: Target stream, which we want to remove the attached plans.
+ * @state: context from which the planes are to be removed.
+ *
+ * Return:
+ * Return true if DC was able to remove all planes from the target
+ * stream, otherwise, return false.
+ */
+bool dc_state_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *state)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++)
+ if (!dc_state_remove_plane(dc, stream, del_planes[i], state))
+ return false;
+
+ return true;
+}
+
+bool dc_state_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *state)
+{
+ int i;
+ bool result = true;
+
+ for (i = 0; i < plane_count; i++)
+ if (!dc_state_add_plane(dc, stream, plane_states[i], state)) {
+ result = false;
+ break;
+ }
+
+ return result;
+}
+
+/* Private dc_state functions */
+
+/**
+ * dc_state_get_stream_status - Get stream status from given dc state
+ * @state: DC state to find the stream status in
+ * @stream: The stream to get the stream status for
+ *
+ * The given stream is expected to exist in the given dc state. Otherwise, NULL
+ * will be returned.
+ */
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream)
+{
+ uint8_t i;
+
+ if (state == NULL)
+ return NULL;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i])
+ return &state->stream_status[i];
+ }
+
+ return NULL;
+}
+
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx)
+{
+ return dc_state_get_stream_subvp_type(state, pipe_ctx->stream);
+}
+
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ int i;
+
+ enum mall_stream_type type = SUBVP_NONE;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] == stream) {
+ type = state->stream_status[i].mall_stream_config.type;
+ break;
+ }
+ }
+
+ return type;
+}
+
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+ const struct dc_stream_state *stream)
+{
+ int i;
+
+ struct dc_stream_state *paired_stream = NULL;
+
+ for (i = 0; i < state->stream_count; i++) {
+ if (state->streams[i] == stream) {
+ paired_stream = state->stream_status[i].mall_stream_config.paired_stream;
+ break;
+ }
+ }
+
+ return paired_stream;
+}
+
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream)
+{
+ struct dc_stream_state *phantom_stream;
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ phantom_stream = dc_create_stream_for_sink(main_stream->sink);
+
+ if (!phantom_stream) {
+ DC_LOG_ERROR("Failed to allocate phantom stream.\n");
+ return NULL;
+ }
+
+ /* track phantom stream in dc_state */
+ dc_state_track_phantom_stream(state, phantom_stream);
+
+ phantom_stream->is_phantom = true;
+ phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
+ phantom_stream->dpms_off = true;
+
+ return phantom_stream;
+}
+
+void dc_state_release_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!dc_state_untrack_phantom_stream(state, phantom_stream)) {
+ DC_LOG_ERROR("Failed to free phantom stream %p in dc state %p.\n", phantom_stream, state);
+ return;
+ }
+
+ dc_stream_release(phantom_stream);
+}
+
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane)
+{
+ struct dc_plane_state *phantom_plane = dc_create_plane_state(dc);
+
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!phantom_plane) {
+ DC_LOG_ERROR("Failed to allocate phantom plane.\n");
+ return NULL;
+ }
+
+ /* track phantom inside dc_state */
+ dc_state_track_phantom_plane(state, phantom_plane);
+
+ phantom_plane->is_phantom = true;
+
+ return phantom_plane;
+}
+
+void dc_state_release_phantom_plane(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *phantom_plane)
+{
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ if (!dc_state_untrack_phantom_plane(state, phantom_plane)) {
+ DC_LOG_ERROR("Failed to free phantom plane %p in dc state %p.\n", phantom_plane, state);
+ return;
+ }
+
+ dc_plane_state_release(phantom_plane);
+}
+
+/* add phantom streams to context and generate correct meta inside dc_state */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream)
+{
+ struct dc_stream_status *main_stream_status;
+ struct dc_stream_status *phantom_stream_status;
+ enum dc_status res = dc_state_add_stream(dc, state, phantom_stream);
+
+ /* check if stream is tracked */
+ if (res == DC_OK && !dc_state_is_phantom_stream_tracked(state, phantom_stream)) {
+ /* stream must be tracked if added to state */
+ dc_state_track_phantom_stream(state, phantom_stream);
+ }
+
+ /* setup subvp meta */
+ main_stream_status = dc_state_get_stream_status(state, main_stream);
+ phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+ phantom_stream_status->mall_stream_config.type = SUBVP_PHANTOM;
+ phantom_stream_status->mall_stream_config.paired_stream = main_stream;
+ main_stream_status->mall_stream_config.type = SUBVP_MAIN;
+ main_stream_status->mall_stream_config.paired_stream = phantom_stream;
+
+ return res;
+}
+
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream)
+{
+ struct dc_stream_status *main_stream_status;
+ struct dc_stream_status *phantom_stream_status;
+
+ /* reset subvp meta */
+ phantom_stream_status = dc_state_get_stream_status(state, phantom_stream);
+ main_stream_status = dc_state_get_stream_status(state, phantom_stream_status->mall_stream_config.paired_stream);
+ phantom_stream_status->mall_stream_config.type = SUBVP_NONE;
+ phantom_stream_status->mall_stream_config.paired_stream = NULL;
+ if (main_stream_status) {
+ main_stream_status->mall_stream_config.type = SUBVP_NONE;
+ main_stream_status->mall_stream_config.paired_stream = NULL;
+ }
+
+ /* remove stream from state */
+ return dc_state_remove_stream(dc, state, phantom_stream);
+}
+
+bool dc_state_add_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state)
+{
+ bool res = dc_state_add_plane(dc, phantom_stream, phantom_plane, state);
+
+ /* check if stream is tracked */
+ if (res && !dc_state_is_phantom_plane_tracked(state, phantom_plane)) {
+ /* stream must be tracked if added to state */
+ dc_state_track_phantom_plane(state, phantom_plane);
+ }
+
+ return res;
+}
+
+bool dc_state_remove_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state)
+{
+ return dc_state_remove_plane(dc, phantom_stream, phantom_plane, state);
+}
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_state *state,
+ bool should_release_planes)
+{
+ int i, old_plane_count;
+ struct dc_stream_status *stream_status = NULL;
+ struct dc_plane_state *del_planes[MAX_SURFACE_NUM] = { 0 };
+
+ for (i = 0; i < state->stream_count; i++)
+ if (state->streams[i] == phantom_stream) {
+ stream_status = &state->stream_status[i];
+ break;
+ }
+
+ if (stream_status == NULL) {
+ dm_error("Existing stream %p not found!\n", phantom_stream);
+ return false;
+ }
+
+ old_plane_count = stream_status->plane_count;
+
+ for (i = 0; i < old_plane_count; i++)
+ del_planes[i] = stream_status->plane_states[i];
+
+ for (i = 0; i < old_plane_count; i++) {
+ if (!dc_state_remove_plane(dc, phantom_stream, del_planes[i], state))
+ return false;
+ if (should_release_planes)
+ dc_state_release_phantom_plane(dc, state, del_planes[i]);
+ }
+
+ return true;
+}
+
+bool dc_state_add_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state * const *phantom_planes,
+ int plane_count,
+ struct dc_state *state)
+{
+ return dc_state_add_all_planes_for_stream(dc, phantom_stream, phantom_planes, plane_count, state);
+}
+
+bool dc_state_remove_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state)
+{
+ int i;
+ bool removed_phantom = false;
+ struct dc_stream_state *phantom_stream = NULL;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
+
+ if (pipe->plane_state && pipe->stream && dc_state_get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
+ phantom_stream = pipe->stream;
+
+ dc_state_rem_all_phantom_planes_for_stream(dc, phantom_stream, state, false);
+ dc_state_remove_phantom_stream(dc, state, phantom_stream);
+ removed_phantom = true;
+ }
+ }
+ return removed_phantom;
+}
+
+void dc_state_release_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state)
+{
+ int i;
+
+ for (i = 0; i < state->phantom_stream_count; i++)
+ dc_state_release_phantom_stream(dc, state, state->phantom_streams[i]);
+
+ for (i = 0; i < state->phantom_plane_count; i++)
+ dc_state_release_phantom_plane(dc, state, state->phantom_planes[i]);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 4bdf105d1..51a970fcb 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -31,6 +31,8 @@
#include "ipp.h"
#include "timing_generator.h"
#include "dc_dmub_srv.h"
+#include "dc_state_priv.h"
+#include "dc_stream_priv.h"
#define DC_LOGGER dc->ctx->logger
@@ -54,7 +56,7 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink)
}
}
-static bool dc_stream_construct(struct dc_stream_state *stream,
+bool dc_stream_construct(struct dc_stream_state *stream,
struct dc_sink *dc_sink_data)
{
uint32_t i = 0;
@@ -121,13 +123,12 @@ static bool dc_stream_construct(struct dc_stream_state *stream,
}
stream->out_transfer_func->type = TF_TYPE_BYPASS;
- stream->stream_id = stream->ctx->dc_stream_id_count;
- stream->ctx->dc_stream_id_count++;
+ dc_stream_assign_stream_id(stream);
return true;
}
-static void dc_stream_destruct(struct dc_stream_state *stream)
+void dc_stream_destruct(struct dc_stream_state *stream)
{
dc_sink_release(stream->sink);
if (stream->out_transfer_func != NULL) {
@@ -136,6 +137,13 @@ static void dc_stream_destruct(struct dc_stream_state *stream)
}
}
+void dc_stream_assign_stream_id(struct dc_stream_state *stream)
+{
+ /* MSB is reserved to indicate phantoms */
+ stream->stream_id = stream->ctx->dc_stream_id_count;
+ stream->ctx->dc_stream_id_count++;
+}
+
void dc_stream_retain(struct dc_stream_state *stream)
{
kref_get(&stream->refcount);
@@ -196,8 +204,7 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
if (new_stream->out_transfer_func)
dc_transfer_func_retain(new_stream->out_transfer_func);
- new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
- new_stream->ctx->dc_stream_id_count++;
+ dc_stream_assign_stream_id(new_stream);
/* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
@@ -209,31 +216,6 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
}
/**
- * dc_stream_get_status_from_state - Get stream status from given dc state
- * @state: DC state to find the stream status in
- * @stream: The stream to get the stream status for
- *
- * The given stream is expected to exist in the given dc state. Otherwise, NULL
- * will be returned.
- */
-struct dc_stream_status *dc_stream_get_status_from_state(
- struct dc_state *state,
- struct dc_stream_state *stream)
-{
- uint8_t i;
-
- if (state == NULL)
- return NULL;
-
- for (i = 0; i < state->stream_count; i++) {
- if (stream == state->streams[i])
- return &state->stream_status[i];
- }
-
- return NULL;
-}
-
-/**
* dc_stream_get_status() - Get current stream status of the given stream state
* @stream: The stream to get the stream status for.
*
@@ -244,7 +226,7 @@ struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *stream)
{
struct dc *dc = stream->ctx->dc;
- return dc_stream_get_status_from_state(dc->current_state, stream);
+ return dc_state_get_stream_status(dc->current_state, stream);
}
static void program_cursor_attributes(
@@ -441,6 +423,8 @@ bool dc_stream_add_writeback(struct dc *dc,
return false;
}
+ dc_exit_ips_for_hw_access(dc);
+
wb_info->dwb_params.out_transfer_func = stream->out_transfer_func;
dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
@@ -465,16 +449,37 @@ bool dc_stream_add_writeback(struct dc *dc,
if (dc->hwss.enable_writeback) {
struct dc_stream_status *stream_status = dc_stream_get_status(stream);
struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
- dwb->otg_inst = stream_status->primary_otg_inst;
+ if (stream_status)
+ dwb->otg_inst = stream_status->primary_otg_inst;
+ }
+
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ /* enable writeback */
+ if (dc->hwss.enable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb)) {
+ /* writeback pipe already enabled, only need to update */
+ dc->hwss.update_writeback(dc, wb_info, dc->current_state);
+ } else {
+ /* Enable writeback pipe from scratch*/
+ dc->hwss.enable_writeback(dc, wb_info, dc->current_state);
+ }
}
+
return true;
}
-bool dc_stream_remove_writeback(struct dc *dc,
+bool dc_stream_fc_disable_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst)
{
- int i = 0, j = 0;
+ struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+
if (stream == NULL) {
dm_error("DC: dc_stream is NULL!\n");
return false;
@@ -490,27 +495,67 @@ bool dc_stream_remove_writeback(struct dc *dc,
return false;
}
-// stream->writeback_info[dwb_pipe_inst].wb_enabled = false;
- for (i = 0; i < stream->num_wb_info; i++) {
- /*dynamic update*/
- if (stream->writeback_info[i].wb_enabled &&
- stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst) {
- stream->writeback_info[i].wb_enabled = false;
- }
+ dc_exit_ips_for_hw_access(dc);
+
+ if (dwb->funcs->set_fc_enable)
+ dwb->funcs->set_fc_enable(dwb, DWB_FRAME_CAPTURE_DISABLE);
+
+ return true;
+}
+
+bool dc_stream_remove_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t dwb_pipe_inst)
+{
+ int i = 0, j = 0;
+ if (stream == NULL) {
+ dm_error("DC: dc_stream is NULL!\n");
+ return false;
+ }
+
+ if (dwb_pipe_inst >= MAX_DWB_PIPES) {
+ dm_error("DC: writeback pipe is invalid!\n");
+ return false;
+ }
+
+ if (stream->num_wb_info > MAX_DWB_PIPES) {
+ dm_error("DC: num_wb_info is invalid!\n");
+ return false;
}
/* remove writeback info for disabled writeback pipes from stream */
for (i = 0, j = 0; i < stream->num_wb_info; i++) {
if (stream->writeback_info[i].wb_enabled) {
- if (j < i)
- /* trim the array */
+
+ if (stream->writeback_info[i].dwb_pipe_inst == dwb_pipe_inst)
+ stream->writeback_info[i].wb_enabled = false;
+
+ /* trim the array */
+ if (j < i) {
memcpy(&stream->writeback_info[j], &stream->writeback_info[i],
sizeof(struct dc_writeback_info));
- j++;
+ j++;
+ }
}
}
stream->num_wb_info = j;
+ /* recalculate and apply DML parameters */
+ if (!dc->hwss.update_bandwidth(dc, dc->current_state)) {
+ dm_error("DC: update_bandwidth failed!\n");
+ return false;
+ }
+
+ dc_exit_ips_for_hw_access(dc);
+
+ /* disable writeback */
+ if (dc->hwss.disable_writeback) {
+ struct dwbc *dwb = dc->res_pool->dwbc[dwb_pipe_inst];
+
+ if (dwb->funcs->is_enabled(dwb))
+ dc->hwss.disable_writeback(dc, dwb_pipe_inst);
+ }
+
return true;
}
@@ -518,6 +563,8 @@ bool dc_stream_warmup_writeback(struct dc *dc,
int num_dwb,
struct dc_writeback_info *wb_info)
{
+ dc_exit_ips_for_hw_access(dc);
+
if (dc->hwss.mmhubbub_warmup)
return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info);
else
@@ -530,6 +577,8 @@ uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream)
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -558,6 +607,8 @@ bool dc_stream_send_dp_sdp(const struct dc_stream_state *stream,
dc = stream->ctx->dc;
res_ctx = &dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct pipe_ctx *pipe_ctx = &res_ctx->pipe_ctx[i];
@@ -589,6 +640,8 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
struct resource_context *res_ctx =
&dc->current_state->res_ctx;
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < MAX_PIPES; i++) {
struct timing_generator *tg = res_ctx->pipe_ctx[i].stream_res.tg;
@@ -625,6 +678,8 @@ bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream)
if (i == MAX_PIPES)
return true;
+ dc_exit_ips_for_hw_access(dc);
+
return dc->hwss.dmdata_status_done(pipe);
}
@@ -659,6 +714,8 @@ bool dc_stream_set_dynamic_metadata(struct dc *dc,
pipe_ctx->stream->dmdata_address = attr->address;
+ dc_exit_ips_for_hw_access(dc);
+
dc->hwss.program_dmdata_engine(pipe_ctx);
if (hubp->funcs->dmdata_set_attributes != NULL &&
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index a80e45300..19140fb65 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -32,10 +32,12 @@
#include "transform.h"
#include "dpp.h"
+#include "dc_plane_priv.h"
+
/*******************************************************************************
* Private functions
******************************************************************************/
-static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state)
{
plane_state->ctx = ctx;
@@ -63,7 +65,7 @@ static void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *pl
}
-static void dc_plane_destruct(struct dc_plane_state *plane_state)
+void dc_plane_destruct(struct dc_plane_state *plane_state)
{
if (plane_state->gamma_correction != NULL) {
dc_gamma_release(&plane_state->gamma_correction);
@@ -159,6 +161,8 @@ const struct dc_plane_status *dc_plane_get_status(
break;
}
+ dc_exit_ips_for_hw_access(dc);
+
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8164a5340..fc60fa581 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -27,6 +27,8 @@
#define DC_INTERFACE_H_
#include "dc_types.h"
+#include "dc_state.h"
+#include "dc_plane.h"
#include "grph_object_defs.h"
#include "logger_types.h"
#include "hdcp_msg_types.h"
@@ -49,7 +51,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.259"
+#define DC_VER "3.2.266"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -432,6 +434,7 @@ struct dc_config {
bool EnableMinDispClkODM;
bool enable_auto_dpm_test_logs;
unsigned int disable_ips;
+ unsigned int disable_ips_in_vpb;
};
enum visual_confirm {
@@ -461,6 +464,12 @@ enum dml_hostvm_override_opts {
DML_HOSTVM_OVERRIDE_TRUE = 0x2,
};
+enum dc_replay_power_opts {
+ replay_power_opt_invalid = 0x0,
+ replay_power_opt_smu_opt_static_screen = 0x1,
+ replay_power_opt_z10_static_screen = 0x10,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -956,7 +965,6 @@ struct dc_debug_options {
unsigned int min_prefetch_in_strobe_ns;
bool disable_unbounded_requesting;
bool dig_fifo_off_in_blank;
- bool temp_mst_deallocation_sequence;
bool override_dispclk_programming;
bool otg_crc_db;
bool disallow_dispclk_dppclk_ds;
@@ -979,6 +987,10 @@ struct dc_debug_options {
bool psp_disabled_wa;
unsigned int ips2_eval_delay_us;
unsigned int ips2_entry_delay_us;
+ bool disable_dmub_reallow_idle;
+ bool disable_timeout;
+ bool disable_extblankadj;
+ unsigned int static_screen_wait_frames;
};
struct gpu_info_soc_bounding_box_v1_0;
@@ -1026,7 +1038,6 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
- bool wm_optimized_required;
bool idle_optimizations_allowed;
bool enable_c20_dtm_b0;
@@ -1389,13 +1400,6 @@ struct dc_surface_update {
/*
* Create a new surface with default parameters;
*/
-struct dc_plane_state *dc_create_plane_state(struct dc *dc);
-const struct dc_plane_status *dc_plane_get_status(
- const struct dc_plane_state *plane_state);
-
-void dc_plane_state_retain(struct dc_plane_state *plane_state);
-void dc_plane_state_release(struct dc_plane_state *plane_state);
-
void dc_gamma_retain(struct dc_gamma *dc_gamma);
void dc_gamma_release(struct dc_gamma **dc_gamma);
struct dc_gamma *dc_create_gamma(void);
@@ -1459,37 +1463,20 @@ enum dc_status dc_validate_global_state(
struct dc_state *new_ctx,
bool fast_validate);
-
-void dc_resource_state_construct(
- const struct dc *dc,
- struct dc_state *dst_ctx);
-
bool dc_acquire_release_mpc_3dlut(
struct dc *dc, bool acquire,
struct dc_stream_state *stream,
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-void dc_resource_state_copy_construct(
- const struct dc_state *src_ctx,
- struct dc_state *dst_ctx);
-
-void dc_resource_state_copy_construct_current(
- const struct dc *dc,
- struct dc_state *dst_ctx);
-
-void dc_resource_state_destruct(struct dc_state *context);
-
bool dc_resource_is_dsc_encoding_supported(const struct dc *dc);
+void get_audio_check(struct audio_info *aud_modes,
+ struct audio_check *aud_chk);
enum dc_status dc_commit_streams(struct dc *dc,
struct dc_stream_state *streams[],
uint8_t stream_count);
-struct dc_state *dc_create_state(struct dc *dc);
-struct dc_state *dc_copy_state(struct dc_state *src_ctx);
-void dc_retain_state(struct dc_state *context);
-void dc_release_state(struct dc_state *context);
struct dc_plane_state *dc_get_surface_for_mpcc(struct dc *dc,
struct dc_stream_state *stream,
@@ -1541,7 +1528,13 @@ struct dc_link {
bool is_dig_mapping_flexible;
bool hpd_status; /* HPD status of link without physical HPD pin. */
bool is_hpd_pending; /* Indicates a new received hpd */
- bool is_automated; /* Indicates automated testing */
+
+ /* USB4 DPIA links skip verifying link cap, instead performing the fallback method
+ * for every link training. This is incompatible with DP LL compliance automation,
+ * which expects the same link settings to be used every retry on a link loss.
+ * This flag is used to skip the fallback when link loss occurs during automation.
+ */
+ bool skip_fallback_on_link_loss;
bool edp_sink_present;
@@ -2092,6 +2085,20 @@ bool dc_link_setup_psr(struct dc_link *dc_link,
const struct dc_stream_state *stream, struct psr_config *psr_config,
struct psr_context *psr_context);
+/*
+ * Communicate with DMUB to allow or disallow Panel Replay on the specified link:
+ *
+ * @link: pointer to the dc_link struct instance
+ * @enable: enable(active) or disable(inactive) replay
+ * @wait: state transition need to wait the active set completed.
+ * @force_static: force disable(inactive) the replay
+ * @power_opts: set power optimazation parameters to DMUB.
+ *
+ * return: allow Replay active will return true, else will return false.
+ */
+bool dc_link_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
+
bool dc_link_get_replay_state(const struct dc_link *dc_link, uint64_t *state);
/* On eDP links this function call will stall until T12 has elapsed.
@@ -2318,6 +2325,7 @@ bool dc_is_plane_eligible_for_idle_optimizations(struct dc *dc, struct dc_plane_
struct dc_cursor_attributes *cursor_attr);
void dc_allow_idle_optimizations(struct dc *dc, bool allow);
+void dc_exit_ips_for_hw_access(struct dc *dc);
bool dc_dmub_is_ips_idle_state(struct dc *dc);
/* set min and max memory clock to lowest and highest DPM level, respectively */
@@ -2336,6 +2344,9 @@ void dc_hardware_release(struct dc *dc);
void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
+
+bool dc_set_replay_allow_active(struct dc *dc, bool active);
+
void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index 05b3433cb..9084b3208 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -33,6 +33,7 @@
#include "cursor_reg_cache.h"
#include "resource.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#define CTX dc_dmub_srv->ctx
#define DC_LOGGER CTX->logger
@@ -141,7 +142,10 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_cmd_execute(dmub);
+ if (status == DMUB_STATUS_POWER_STATE_D3)
+ return false;
+
dmub_srv_wait_for_idle(dmub, 100000);
/* Requeue the command. */
@@ -149,16 +153,20 @@ bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv,
}
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
}
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
@@ -219,7 +227,10 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
if (status == DMUB_STATUS_QUEUE_FULL) {
/* Execute and wait for queue to become empty again. */
- dmub_srv_cmd_execute(dmub);
+ status = dmub_srv_cmd_execute(dmub);
+ if (status == DMUB_STATUS_POWER_STATE_D3)
+ return false;
+
dmub_srv_wait_for_idle(dmub, 100000);
/* Requeue the command. */
@@ -227,22 +238,31 @@ bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int coun
}
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error queueing DMUB command: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error queueing DMUB command: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
}
status = dmub_srv_cmd_execute(dmub);
if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error starting DMUB execution: status=%d\n", status);
- dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ if (status != DMUB_STATUS_POWER_STATE_D3) {
+ DC_ERROR("Error starting DMUB execution: status=%d\n", status);
+ dc_dmub_srv_log_diagnostic_data(dc_dmub_srv);
+ }
return false;
}
// Wait for DMUB to process command
if (wait_type != DM_DMUB_WAIT_TYPE_NO_WAIT) {
- status = dmub_srv_wait_for_idle(dmub, 100000);
+ if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
+ do {
+ status = dmub_srv_wait_for_idle(dmub, 100000);
+ } while (status != DMUB_STATUS_OK);
+ } else
+ status = dmub_srv_wait_for_idle(dmub, 100000);
if (status != DMUB_STATUS_OK) {
DC_LOG_DEBUG("No reply for DMUB command: status=%d\n", status);
@@ -500,10 +520,11 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
/**
* populate_subvp_cmd_drr_info - Helper to populate DRR pipe info for the DMCUB subvp command
*
- * @dc: [in] current dc state
+ * @dc: [in] pointer to dc object
* @subvp_pipe: [in] pipe_ctx for the SubVP pipe
* @vblank_pipe: [in] pipe_ctx for the DRR pipe
* @pipe_data: [in] Pipe data which stores the VBLANK/DRR info
+ * @context: [in] DC state for access to phantom stream
*
* Populate the DMCUB SubVP command with DRR pipe info. All the information
* required for calculating the SubVP + DRR microschedule is populated here.
@@ -514,12 +535,14 @@ void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pi
* 3. Populate the drr_info with the min and max supported vtotal values
*/
static void populate_subvp_cmd_drr_info(struct dc *dc,
+ struct dc_state *context,
struct pipe_ctx *subvp_pipe,
struct pipe_ctx *vblank_pipe,
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data)
{
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
struct dc_crtc_timing *drr_timing = &vblank_pipe->stream->timing;
uint16_t drr_frame_us = 0;
uint16_t min_drr_supported_us = 0;
@@ -607,7 +630,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
break;
}
@@ -624,7 +647,7 @@ static void populate_subvp_cmd_vblank_pipe_info(struct dc *dc,
if (vblank_pipe->stream->ignore_msa_timing_param &&
(vblank_pipe->stream->allow_freesync || vblank_pipe->stream->vrr_active_variable || vblank_pipe->stream->vrr_active_fixed))
- populate_subvp_cmd_drr_info(dc, pipe, vblank_pipe, pipe_data);
+ populate_subvp_cmd_drr_info(dc, context, pipe, vblank_pipe, pipe_data);
}
/**
@@ -649,10 +672,17 @@ static void update_subvp_prefetch_end_to_mall_start(struct dc *dc,
uint32_t subvp0_prefetch_us = 0;
uint32_t subvp1_prefetch_us = 0;
uint32_t prefetch_delta_us = 0;
- struct dc_crtc_timing *phantom_timing0 = &subvp_pipes[0]->stream->mall_stream_config.paired_stream->timing;
- struct dc_crtc_timing *phantom_timing1 = &subvp_pipes[1]->stream->mall_stream_config.paired_stream->timing;
+ struct dc_stream_state *phantom_stream0 = NULL;
+ struct dc_stream_state *phantom_stream1 = NULL;
+ struct dc_crtc_timing *phantom_timing0 = NULL;
+ struct dc_crtc_timing *phantom_timing1 = NULL;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data = NULL;
+ phantom_stream0 = dc_state_get_paired_subvp_stream(context, subvp_pipes[0]->stream);
+ phantom_stream1 = dc_state_get_paired_subvp_stream(context, subvp_pipes[1]->stream);
+ phantom_timing0 = &phantom_stream0->timing;
+ phantom_timing1 = &phantom_stream1->timing;
+
subvp0_prefetch_us = div64_u64(((uint64_t)(phantom_timing0->v_total - phantom_timing0->v_front_porch) *
(uint64_t)phantom_timing0->h_total * 1000000),
(((uint64_t)phantom_timing0->pix_clk_100hz * 100) + dc->caps.subvp_prefetch_end_to_mall_start_us));
@@ -702,8 +732,9 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
uint32_t j;
struct dmub_cmd_fw_assisted_mclk_switch_pipe_data_v2 *pipe_data =
&cmd->fw_assisted_mclk_switch_v2.config_data.pipe_data[cmd_pipe_index];
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
struct dc_crtc_timing *main_timing = &subvp_pipe->stream->timing;
- struct dc_crtc_timing *phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ struct dc_crtc_timing *phantom_timing = &phantom_stream->timing;
uint32_t out_num_stream, out_den_stream, out_num_plane, out_den_plane, out_num, out_den;
pipe_data->mode = SUBVP;
@@ -757,7 +788,7 @@ static void populate_subvp_cmd_pipe_info(struct dc *dc,
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct pipe_ctx *phantom_pipe = &context->res_ctx.pipe_ctx[j];
- if (phantom_pipe->stream == subvp_pipe->stream->mall_stream_config.paired_stream) {
+ if (phantom_pipe->stream == dc_state_get_paired_subvp_stream(context, subvp_pipe->stream)) {
pipe_data->pipe_config.subvp_data.phantom_pipe_index = phantom_pipe->stream_res.tg->inst;
if (phantom_pipe->bottom_pipe) {
pipe_data->pipe_config.subvp_data.phantom_split_pipe_index = phantom_pipe->bottom_pipe->plane_res.hubp->inst;
@@ -791,6 +822,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
union dmub_rb_cmd cmd;
struct pipe_ctx *subvp_pipes[2];
uint32_t wm_val_refclk = 0;
+ enum mall_stream_type pipe_mall_type;
memset(&cmd, 0, sizeof(cmd));
// FW command for SUBVP
@@ -806,7 +838,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
*/
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
subvp_pipes[subvp_count++] = pipe;
}
@@ -814,6 +846,7 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
// For each pipe that is a "main" SUBVP pipe, fill in pipe data for DMUB SUBVP cmd
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
@@ -824,12 +857,11 @@ void dc_dmub_setup_subvp_dmub_command(struct dc *dc,
*/
if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.paired_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ pipe_mall_type == SUBVP_MAIN) {
populate_subvp_cmd_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
} else if (resource_is_pipe_type(pipe, OTG_MASTER) &&
resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
// Don't need to check for ActiveDRAMClockChangeMargin < 0, not valid in cases where
// we run through DML without calculating "natural" P-state support
populate_subvp_cmd_vblank_pipe_info(dc, context, &cmd, pipe, cmd_pipe_index++);
@@ -1142,10 +1174,16 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
dc_ctx = dc_dmub_srv->ctx;
if (wait) {
- status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
- if (status != DMUB_STATUS_OK) {
- DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
- return false;
+ if (dc_dmub_srv->ctx->dc->debug.disable_timeout) {
+ do {
+ status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
+ } while (status != DMUB_STATUS_OK);
+ } else {
+ status = dmub_srv_wait_for_hw_pwr_up(dc_dmub_srv->dmub, 500000);
+ if (status != DMUB_STATUS_OK) {
+ DC_ERROR("Error querying DMUB hw power up status: error=%d\n", status);
+ return false;
+ }
}
} else
return dmub_srv_is_hw_pwr_up(dc_dmub_srv->dmub);
@@ -1175,22 +1213,18 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
}
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
- dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ /* We also do not perform a wait since DMCUB could enter idle after the notification. */
+ dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_NO_WAIT);
}
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
- const uint32_t max_num_polls = 10000;
uint32_t allow_state = 0;
uint32_t commit_state = 0;
- uint32_t i;
if (dc->debug.dmcub_emulation)
return;
- if (!dc->idle_optimizations_allowed)
- return;
-
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
@@ -1203,8 +1237,16 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
- udelay(dc->debug.ips2_eval_delay_us);
- commit_state = dc->hwss.get_idle_state(dc);
+ for (;;) {
+ udelay(dc->debug.ips2_eval_delay_us);
+ commit_state = dc->hwss.get_idle_state(dc);
+ if (commit_state & DMUB_IPS2_ALLOW_MASK)
+ break;
+
+ /* allow was still set, retry eval delay */
+ dc->hwss.set_idle_state(dc, false);
+ }
+
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
@@ -1213,14 +1255,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
udelay(dc->debug.ips2_entry_delay_us);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
- for (i = 0; i < max_num_polls; ++i) {
+ for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
udelay(1);
}
- ASSERT(i < max_num_polls);
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
@@ -1235,14 +1276,13 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
dc_dmub_srv_notify_idle(dc, false);
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
- for (i = 0; i < max_num_polls; ++i) {
+ for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
udelay(1);
}
- ASSERT(i < max_num_polls);
}
}
@@ -1324,7 +1364,7 @@ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned in
else
result = dm_execute_dmub_cmd(ctx, cmd, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
@@ -1373,7 +1413,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com
result = dc_dmub_execute_gpint(ctx, command_code, param, response, wait_type);
- if (result && reallow_idle)
+ if (result && reallow_idle && !ctx->dc->debug.disable_dmub_reallow_idle)
dc_dmub_srv_apply_idle_power_optimizations(ctx->dc, true);
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
new file mode 100644
index 000000000..ef380cae8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_H_
+#define _DC_PLANE_H_
+
+#include "dc.h"
+#include "dc_hw_types.h"
+
+struct dc_plane_state *dc_create_plane_state(struct dc *dc);
+const struct dc_plane_status *dc_plane_get_status(
+ const struct dc_plane_state *plane_state);
+void dc_plane_state_retain(struct dc_plane_state *plane_state);
+void dc_plane_state_release(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
new file mode 100644
index 000000000..9ee184c1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane_priv.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_PLANE_PRIV_H_
+#define _DC_PLANE_PRIV_H_
+
+#include "dc_plane.h"
+
+void dc_plane_construct(struct dc_context *ctx, struct dc_plane_state *plane_state);
+void dc_plane_destruct(struct dc_plane_state *plane_state);
+
+#endif /* _DC_PLANE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state.h b/drivers/gpu/drm/amd/display/dc/dc_state.h
new file mode 100644
index 000000000..d167fdbfa
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_state.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_H_
+#define _DC_STATE_H_
+
+#include "dc.h"
+#include "inc/core_status.h"
+
+struct dc_state *dc_state_create(struct dc *dc);
+void dc_state_copy(struct dc_state *dst_state, struct dc_state *src_state);
+struct dc_state *dc_state_create_copy(struct dc_state *src_state);
+void dc_state_copy_current(struct dc *dc, struct dc_state *dst_state);
+struct dc_state *dc_state_create_current_copy(struct dc *dc);
+void dc_state_construct(struct dc *dc, struct dc_state *state);
+void dc_state_destruct(struct dc_state *state);
+void dc_state_retain(struct dc_state *state);
+void dc_state_release(struct dc_state *state);
+
+enum dc_status dc_state_add_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
+enum dc_status dc_state_remove_stream(
+ struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+
+bool dc_state_add_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state);
+
+bool dc_state_remove_plane(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *state);
+
+bool dc_state_rem_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_state *state);
+
+bool dc_state_add_all_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state * const *plane_states,
+ int plane_count,
+ struct dc_state *state);
+
+struct dc_stream_status *dc_state_get_stream_status(
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+#endif /* _DC_STATE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_state_priv.h b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
new file mode 100644
index 000000000..c1f44e09a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_state_priv.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STATE_PRIV_H_
+#define _DC_STATE_PRIV_H_
+
+#include "dc_state.h"
+#include "dc_stream.h"
+
+/* Get the type of the provided resource (none, phantom, main) based on the provided
+ * context. If the context is unavailable, determine only if phantom or not.
+ */
+enum mall_stream_type dc_state_get_pipe_subvp_type(const struct dc_state *state,
+ const struct pipe_ctx *pipe_ctx);
+enum mall_stream_type dc_state_get_stream_subvp_type(const struct dc_state *state,
+ const struct dc_stream_state *stream);
+
+/* Gets the phantom stream if main is provided, gets the main if phantom is provided.*/
+struct dc_stream_state *dc_state_get_paired_subvp_stream(const struct dc_state *state,
+ const struct dc_stream_state *stream);
+
+/* allocate's phantom stream or plane and returns pointer to the object */
+struct dc_stream_state *dc_state_create_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream);
+struct dc_plane_state *dc_state_create_phantom_plane(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane);
+
+/* deallocate's phantom stream or plane */
+void dc_state_release_phantom_stream(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream);
+void dc_state_release_phantom_plane(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *phantom_plane);
+
+/* add/remove phantom stream to context and generate subvp meta data */
+enum dc_status dc_state_add_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream);
+enum dc_status dc_state_remove_phantom_stream(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream);
+
+bool dc_state_add_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state);
+
+bool dc_state_remove_phantom_plane(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state *phantom_plane,
+ struct dc_state *state);
+
+bool dc_state_rem_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_state *state,
+ bool should_release_planes);
+
+bool dc_state_add_all_phantom_planes_for_stream(
+ const struct dc *dc,
+ struct dc_stream_state *phantom_stream,
+ struct dc_plane_state * const *phantom_planes,
+ int plane_count,
+ struct dc_state *state);
+
+bool dc_state_remove_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state);
+
+void dc_state_release_phantom_streams_and_planes(
+ struct dc *dc,
+ struct dc_state *state);
+
+#endif /* _DC_STATE_PRIV_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e61eea6db..a23eebd99 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -38,6 +38,14 @@ struct timing_sync_info {
bool master;
};
+struct mall_stream_config {
+ /* MALL stream config to indicate if the stream is phantom or not.
+ * We will use a phantom stream to indicate that the pipe is phantom.
+ */
+ enum mall_stream_type type;
+ struct dc_stream_state *paired_stream; // master / slave stream
+};
+
struct dc_stream_status {
int primary_otg_inst;
int stream_enc_inst;
@@ -50,6 +58,7 @@ struct dc_stream_status {
struct timing_sync_info timing_sync_info;
struct dc_plane_state *plane_states[MAX_SURFACE_NUM];
bool is_abm_supported;
+ struct mall_stream_config mall_stream_config;
};
enum hubp_dmdata_mode {
@@ -130,7 +139,6 @@ union stream_update_flags {
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
uint32_t mst_bw : 1;
- uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
} bits;
@@ -147,31 +155,6 @@ struct test_pattern {
#define SUBVP_DRR_MARGIN_US 100 // 100us for DRR margin (SubVP + DRR)
-enum mall_stream_type {
- SUBVP_NONE, // subvp not in use
- SUBVP_MAIN, // subvp in use, this stream is main stream
- SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
-};
-
-struct mall_stream_config {
- /* MALL stream config to indicate if the stream is phantom or not.
- * We will use a phantom stream to indicate that the pipe is phantom.
- */
- enum mall_stream_type type;
- struct dc_stream_state *paired_stream; // master / slave stream
-};
-
-/* Temp struct used to save and restore MALL config
- * during validation.
- *
- * TODO: Move MALL config into dc_state instead of stream struct
- * to avoid needing to save/restore.
- */
-struct mall_temp_config {
- struct mall_stream_config mall_stream_config[MAX_PIPES];
- bool is_phantom_plane[MAX_PIPES];
-};
-
struct dc_stream_debug_options {
char force_odm_combine_segments;
};
@@ -301,7 +284,7 @@ struct dc_stream_state {
bool has_non_synchronizable_pclk;
bool vblank_synchronized;
bool fpo_in_use;
- struct mall_stream_config mall_stream_config;
+ bool is_phantom;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -342,7 +325,6 @@ struct dc_stream_update {
struct dc_3dlut *lut3d_func;
struct test_pattern *pending_test_pattern;
- struct dc_crtc_timing_adjust *crtc_timing_adjust;
};
bool dc_is_stream_unchanged(
@@ -415,45 +397,14 @@ bool dc_stream_get_scanoutpos(const struct dc_stream_state *stream,
uint32_t *h_position,
uint32_t *v_position);
-enum dc_status dc_add_stream_to_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-enum dc_status dc_remove_stream_from_ctx(
- struct dc *dc,
- struct dc_state *new_ctx,
- struct dc_stream_state *stream);
-
-
-bool dc_add_plane_to_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_remove_plane_from_context(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state *plane_state,
- struct dc_state *context);
-
-bool dc_rem_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_state *context);
-
-bool dc_add_all_planes_for_stream(
- const struct dc *dc,
- struct dc_stream_state *stream,
- struct dc_plane_state * const *plane_states,
- int plane_count,
- struct dc_state *context);
-
bool dc_stream_add_writeback(struct dc *dc,
struct dc_stream_state *stream,
struct dc_writeback_info *wb_info);
+bool dc_stream_fc_disable_writeback(struct dc *dc,
+ struct dc_stream_state *stream,
+ uint32_t dwb_pipe_inst);
+
bool dc_stream_remove_writeback(struct dc *dc,
struct dc_stream_state *stream,
uint32_t dwb_pipe_inst);
@@ -514,9 +465,6 @@ void update_stream_signal(struct dc_stream_state *stream, struct dc_sink *sink);
void dc_stream_retain(struct dc_stream_state *dc_stream);
void dc_stream_release(struct dc_stream_state *dc_stream);
-struct dc_stream_status *dc_stream_get_status_from_state(
- struct dc_state *state,
- struct dc_stream_state *stream);
struct dc_stream_status *dc_stream_get_status(
struct dc_stream_state *dc_stream);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
new file mode 100644
index 000000000..7476fd52c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream_priv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DC_STREAM_PRIV_H_
+#define _DC_STREAM_PRIV_H_
+
+#include "dc_stream.h"
+
+bool dc_stream_construct(struct dc_stream_state *stream,
+ struct dc_sink *dc_sink_data);
+void dc_stream_destruct(struct dc_stream_state *stream);
+
+void dc_stream_assign_stream_id(struct dc_stream_state *stream);
+
+#endif // _DC_STREAM_PRIV_H_
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 66d0774be..be2ac5c44 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1018,6 +1018,25 @@ enum replay_coasting_vtotal_type {
PR_COASTING_TYPE_NUM,
};
+enum replay_link_off_frame_count_level {
+ PR_LINK_OFF_FRAME_COUNT_FAIL = 0x0,
+ PR_LINK_OFF_FRAME_COUNT_GOOD = 0x2,
+ PR_LINK_OFF_FRAME_COUNT_BEST = 0x6,
+};
+
+/*
+ * This is general Interface for Replay to
+ * set an 32 bit variable to dmub
+ * The Message_type indicates which variable
+ * passed to DMUB.
+ */
+enum replay_FW_Message_type {
+ Replay_Msg_Not_Support = -1,
+ Replay_Set_Timing_Sync_Supported,
+ Replay_Set_Residency_Frameupdate_Timer,
+ Replay_Set_Pseudo_VTotal,
+};
+
union replay_error_status {
struct {
unsigned char STATE_TRANSITION_ERROR :1;
@@ -1029,26 +1048,52 @@ union replay_error_status {
};
struct replay_config {
- bool replay_supported; // Replay feature is supported
- unsigned int replay_power_opt_supported; // Power opt flags that are supported
- bool replay_smu_opt_supported; // SMU optimization is supported
- unsigned int replay_enable_option; // Replay enablement option
- uint32_t debug_flags; // Replay debug flags
- bool replay_timing_sync_supported; // Replay desync is supported
- bool force_disable_desync_error_check; // Replay desync is supported
- bool received_desync_error_hpd; //Replay Received Desync Error HPD.
- union replay_error_status replay_error_status; // Replay error status
-};
-
-/* Replay feature flags */
+ /* Replay feature is supported */
+ bool replay_supported;
+ /* Power opt flags that are supported */
+ unsigned int replay_power_opt_supported;
+ /* SMU optimization is supported */
+ bool replay_smu_opt_supported;
+ /* Replay enablement option */
+ unsigned int replay_enable_option;
+ /* Replay debug flags */
+ uint32_t debug_flags;
+ /* Replay sync is supported */
+ bool replay_timing_sync_supported;
+ /* Replay Disable desync error check. */
+ bool force_disable_desync_error_check;
+ /* Replay Received Desync Error HPD. */
+ bool received_desync_error_hpd;
+ /* Replay feature is supported long vblank */
+ bool replay_support_fast_resync_in_ultra_sleep_mode;
+ /* Replay error status */
+ union replay_error_status replay_error_status;
+};
+
+/* Replay feature flags*/
struct replay_settings {
- struct replay_config config; // Replay configuration
- bool replay_feature_enabled; // Replay feature is ready for activating
- bool replay_allow_active; // Replay is currently active
- unsigned int replay_power_opt_active; // Power opt flags that are activated currently
- bool replay_smu_opt_enable; // SMU optimization is enabled
- uint16_t coasting_vtotal; // Current Coasting vtotal
- uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; // Coasting vtotal table
+ /* Replay configuration */
+ struct replay_config config;
+ /* Replay feature is ready for activating */
+ bool replay_feature_enabled;
+ /* Replay is currently active */
+ bool replay_allow_active;
+ /* Replay is currently active */
+ bool replay_allow_long_vblank;
+ /* Power opt flags that are activated currently */
+ unsigned int replay_power_opt_active;
+ /* SMU optimization is enabled */
+ bool replay_smu_opt_enable;
+ /* Current Coasting vtotal */
+ uint32_t coasting_vtotal;
+ /* Coasting vtotal table */
+ uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
+ /* Maximum link off frame count */
+ enum replay_link_off_frame_count_level link_off_frame_count_level;
+ /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
+ uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+ /* Replay last pseudo vtotal set to DMUB */
+ uint16_t last_pseudo_vtotal;
};
/* To split out "global" and "per-panel" config settings.
@@ -1125,4 +1170,9 @@ enum dc_hpd_enable_select {
HPD_EN_FOR_SECONDARY_EDP_ONLY,
};
+enum mall_stream_type {
+ SUBVP_NONE, // subvp not in use
+ SUBVP_MAIN, // subvp in use, this stream is main stream
+ SUBVP_PHANTOM, // subvp in use, this stream is a phantom stream
+};
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
index 874b132fe..a60067763 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c
@@ -135,7 +135,7 @@ static void dmcu_set_backlight_level(
0, 1, 80000);
}
-static void dce_abm_init(struct abm *abm, uint32_t backlight)
+static void dce_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
struct dce_abm *abm_dce = TO_DCE_ABM(abm);
@@ -162,7 +162,7 @@ static void dce_abm_init(struct abm *abm, uint32_t backlight)
BL1_PWM_TARGET_ABM_LEVEL, backlight);
REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ BL1_PWM_USER_LEVEL, user_level);
REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 930fd929e..ccc154b02 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -57,18 +57,22 @@ static unsigned int abm_feature_support(struct abm *abm, unsigned int panel_inst
return ret;
}
-static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight)
+static void dmub_abm_init_ex(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
- dmub_abm_init(abm, backlight);
+ dmub_abm_init(abm, backlight, user_level);
}
static unsigned int dmub_abm_get_current_backlight_ex(struct abm *abm)
{
+ dc_allow_idle_optimizations(abm->ctx->dc, false);
+
return dmub_abm_get_current_backlight(abm);
}
static unsigned int dmub_abm_get_target_backlight_ex(struct abm *abm)
{
+ dc_allow_idle_optimizations(abm->ctx->dc, false);
+
return dmub_abm_get_target_backlight(abm);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
index 4cff36351..f9d6a1811 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.c
@@ -79,7 +79,7 @@ static void dmub_abm_enable_fractional_pwm(struct dc_context *dc)
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
-void dmub_abm_init(struct abm *abm, uint32_t backlight)
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level)
{
struct dce_abm *dce_abm = TO_DMUB_ABM(abm);
@@ -106,7 +106,7 @@ void dmub_abm_init(struct abm *abm, uint32_t backlight)
BL1_PWM_TARGET_ABM_LEVEL, backlight);
REG_UPDATE(BL1_PWM_USER_LEVEL,
- BL1_PWM_USER_LEVEL, backlight);
+ BL1_PWM_USER_LEVEL, user_level);
REG_UPDATE_2(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES,
ABM1_LS_MIN_PIXEL_VALUE_THRES, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
index 07ea6c8d4..761685e5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm_lcd.h
@@ -30,7 +30,7 @@
struct abm_save_restore;
-void dmub_abm_init(struct abm *abm, uint32_t backlight);
+void dmub_abm_init(struct abm *abm, uint32_t backlight, uint32_t user_level);
bool dmub_abm_set_level(struct abm *abm, uint32_t level, uint8_t panel_mask);
unsigned int dmub_abm_get_current_backlight(struct abm *abm);
unsigned int dmub_abm_get_target_backlight(struct abm *abm);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 28149e53c..38e4797e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -258,13 +258,97 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
*residency = 0;
}
+/**
+ * Set REPLAY power optimization flags and coasting vtotal.
+ */
+static void dmub_replay_set_power_opt_and_coasting_vtotal(struct dmub_replay *dmub,
+ unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.type = DMUB_CMD__REPLAY;
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.sub_type =
+ DMUB_CMD__REPLAY_SET_POWER_OPT_AND_COASTING_VTOTAL;
+ cmd.replay_set_power_opt_and_coasting_vtotal.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_power_opt_and_coasting_vtotal);
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.power_opt = power_opt;
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_power_opt_data.panel_inst = panel_inst;
+ cmd.replay_set_power_opt_and_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
+
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
+/**
+ * send Replay general cmd to DMUB.
+ */
+static void dmub_replay_send_cmd(struct dmub_replay *dmub,
+ enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *ctx = NULL;
+
+ if (dmub == NULL || cmd_element == NULL)
+ return;
+
+ ctx = dmub->ctx;
+ if (ctx != NULL) {
+
+ if (msg != Replay_Msg_Not_Support) {
+ memset(&cmd, 0, sizeof(cmd));
+ //Header
+ cmd.replay_set_timing_sync.header.type = DMUB_CMD__REPLAY;
+ } else
+ return;
+ } else
+ return;
+
+ switch (msg) {
+ case Replay_Set_Timing_Sync_Supported:
+ //Header
+ cmd.replay_set_timing_sync.header.sub_type =
+ DMUB_CMD__REPLAY_SET_TIMING_SYNC_SUPPORTED;
+ cmd.replay_set_timing_sync.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_timing_sync);
+ //Cmd Body
+ cmd.replay_set_timing_sync.replay_set_timing_sync_data.panel_inst =
+ cmd_element->sync_data.panel_inst;
+ cmd.replay_set_timing_sync.replay_set_timing_sync_data.timing_sync_supported =
+ cmd_element->sync_data.timing_sync_supported;
+ break;
+ case Replay_Set_Residency_Frameupdate_Timer:
+ //Header
+ cmd.replay_set_frameupdate_timer.header.sub_type =
+ DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER;
+ cmd.replay_set_frameupdate_timer.header.payload_bytes =
+ sizeof(struct dmub_rb_cmd_replay_set_frameupdate_timer);
+ //Cmd Body
+ cmd.replay_set_frameupdate_timer.data.panel_inst =
+ cmd_element->panel_inst;
+ cmd.replay_set_frameupdate_timer.data.enable =
+ cmd_element->timer_data.enable;
+ cmd.replay_set_frameupdate_timer.data.frameupdate_count =
+ cmd_element->timer_data.frameupdate_count;
+ break;
+ case Replay_Msg_Not_Support:
+ default:
+ return;
+ break;
+ }
+
+ dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
static const struct dmub_replay_funcs replay_funcs = {
- .replay_copy_settings = dmub_replay_copy_settings,
- .replay_enable = dmub_replay_enable,
- .replay_get_state = dmub_replay_get_state,
- .replay_set_power_opt = dmub_replay_set_power_opt,
- .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
- .replay_residency = dmub_replay_residency,
+ .replay_copy_settings = dmub_replay_copy_settings,
+ .replay_enable = dmub_replay_enable,
+ .replay_get_state = dmub_replay_get_state,
+ .replay_set_power_opt = dmub_replay_set_power_opt,
+ .replay_set_coasting_vtotal = dmub_replay_set_coasting_vtotal,
+ .replay_residency = dmub_replay_residency,
+ .replay_set_power_opt_and_coasting_vtotal = dmub_replay_set_power_opt_and_coasting_vtotal,
+ .replay_send_cmd = dmub_replay_send_cmd,
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
index e8385bbf5..3613aff99 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.h
@@ -45,10 +45,14 @@ struct dmub_replay_funcs {
struct replay_context *replay_context, uint8_t panel_inst);
void (*replay_set_power_opt)(struct dmub_replay *dmub, unsigned int power_opt,
uint8_t panel_inst);
+ void (*replay_send_cmd)(struct dmub_replay *dmub,
+ enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_element);
void (*replay_set_coasting_vtotal)(struct dmub_replay *dmub, uint16_t coasting_vtotal,
uint8_t panel_inst);
void (*replay_residency)(struct dmub_replay *dmub,
uint8_t panel_inst, uint32_t *residency, const bool is_start, const bool is_alpm);
+ void (*replay_set_power_opt_and_coasting_vtotal)(struct dmub_replay *dmub,
+ unsigned int power_opt, uint8_t panel_inst, uint16_t coasting_vtotal);
};
struct dmub_replay *dmub_replay_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/Makefile b/drivers/gpu/drm/amd/display/dc/dce100/Makefile
deleted file mode 100644
index 0d2f6bbf7..000000000
--- a/drivers/gpu/drm/amd/display/dc/dce100/Makefile
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# Copyright 2017 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-#
-# Makefile for the 'controller' sub-component of DAL.
-# It provides the control and status of HW CRTC block.
-
-CFLAGS_$(AMDDALPATH)/dc/dce100/dce100_resource.o = $(call cc-disable-warning, override-init)
-
-DCE100 = dce100_resource.o
-
-AMD_DAL_DCE100 = $(addprefix $(AMDDALPATH)/dc/dce100/,$(DCE100))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCE100)
-
-
-###############################################################################
-# DCE 10x
-###############################################################################
-ifdef 0#CONFIG_DRM_AMD_DC_DCE11_0
-TG_DCE100 = dce100_resource.o
-
-AMD_DAL_TG_DCE100 = $(addprefix \
- $(AMDDALPATH)/dc/dce100/,$(TG_DCE100))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_TG_DCE100)
-endif
-
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
index 695a50ed5..c307f040e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile
@@ -23,11 +23,11 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init
DCE110 = dce110_timing_generator.o \
-dce110_compressor.o dce110_resource.o \
-dce110_opp_regamma_v.o dce110_opp_csc_v.o dce110_timing_generator_v.o \
+dce110_compressor.o dce110_opp_regamma_v.o \
+dce110_opp_csc_v.o dce110_timing_generator_v.o \
dce110_mem_input_v.o dce110_opp_v.o dce110_transform_v.o
AMD_DAL_DCE110 = $(addprefix $(AMDDALPATH)/dc/dce110/,$(DCE110))
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
index e846ef58c..683866797 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile
@@ -23,10 +23,9 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init
-DCE112 = dce112_compressor.o \
-dce112_resource.o
+DCE112 = dce112_compressor.o
AMD_DAL_DCE112 = $(addprefix $(AMDDALPATH)/dc/dce112/,$(DCE112))
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
index 097cf407a..8f508e662 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile
@@ -24,9 +24,9 @@
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init
-DCE120 = dce120_resource.o dce120_timing_generator.o \
+DCE120 = dce120_timing_generator.o
AMD_DAL_DCE120 = $(addprefix $(AMDDALPATH)/dc/dce120/,$(DCE120))
diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
index fee331acc..eede83ad9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init
DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \
dce60_resource.o
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
index 93dd68c31..fba189d26 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile
@@ -23,10 +23,9 @@
# Makefile for the 'controller' sub-component of DAL.
# It provides the control and status of HW CRTC block.
-CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init)
+CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init
-DCE80 = dce80_timing_generator.o \
- dce80_resource.o
+DCE80 = dce80_timing_generator.o
AMD_DAL_DCE80 = $(addprefix $(AMDDALPATH)/dc/dce80/,$(DCE80))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
index 2d2007c3e..ae6a131be 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/Makefile
@@ -22,9 +22,9 @@
#
# Makefile for DCN.
-DCN10 = dcn10_init.o dcn10_resource.o dcn10_ipp.o \
+DCN10 = dcn10_ipp.o \
dcn10_hw_sequencer_debug.o \
- dcn10_dpp.o dcn10_opp.o dcn10_optc.o \
+ dcn10_dpp.o dcn10_opp.o \
dcn10_hubp.o dcn10_mpc.o \
dcn10_dpp_dscl.o dcn10_dpp_cm.o dcn10_cm_common.o \
dcn10_hubbub.o dcn10_stream_encoder.o dcn10_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
index 92fdab731..9033b39e0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer_debug.c
@@ -32,7 +32,7 @@
#include "dce/dce_hwseq.h"
#include "abm.h"
#include "dmcu.h"
-#include "dcn10_optc.h"
+#include "dcn10/dcn10_optc.h"
#include "dcn10/dcn10_dpp.h"
#include "dcn10/dcn10_mpc.h"
#include "timing_generator.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
index 0dec57679..86bfed5de 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c
@@ -377,6 +377,7 @@ static const struct opp_funcs dcn10_opp_funcs = {
.opp_set_disp_pattern_generator = NULL,
.opp_program_dpg_dimensions = NULL,
.dpg_is_blanked = NULL,
+ .dpg_is_pending = NULL,
.opp_destroy = opp1_destroy
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
index d7dc9696a..3dae3943b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/Makefile
@@ -2,13 +2,11 @@
#
# Makefile for DCN.
-DCN20 = dcn20_resource.o dcn20_init.o dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
- dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_optc.o dcn20_mmhubbub.o \
+DCN20 = dcn20_dpp.o dcn20_dpp_cm.o dcn20_hubp.o \
+ dcn20_mpc.o dcn20_opp.o dcn20_hubbub.o dcn20_mmhubbub.o \
dcn20_stream_encoder.o dcn20_link_encoder.o dcn20_dccg.o \
dcn20_vmid.o dcn20_dwb.o dcn20_dwb_scl.o
-DCN20 += dcn20_dsc.o
-
AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN20)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index ab6d09c6f..ef5c22f41 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -291,7 +291,43 @@
type SYMCLKB_FE_SRC_SEL;\
type SYMCLKC_FE_SRC_SEL;\
type SYMCLKD_FE_SRC_SEL;\
- type SYMCLKE_FE_SRC_SEL;
+ type SYMCLKE_FE_SRC_SEL;\
+ type DTBCLK_P0_GATE_DISABLE;\
+ type DTBCLK_P1_GATE_DISABLE;\
+ type DTBCLK_P2_GATE_DISABLE;\
+ type DTBCLK_P3_GATE_DISABLE;\
+ type DSCCLK0_ROOT_GATE_DISABLE;\
+ type DSCCLK1_ROOT_GATE_DISABLE;\
+ type DSCCLK2_ROOT_GATE_DISABLE;\
+ type DSCCLK3_ROOT_GATE_DISABLE;\
+ type SYMCLKA_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKB_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKC_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKD_FE_ROOT_GATE_DISABLE;\
+ type SYMCLKE_FE_ROOT_GATE_DISABLE;\
+ type DPPCLK0_ROOT_GATE_DISABLE;\
+ type DPPCLK1_ROOT_GATE_DISABLE;\
+ type DPPCLK2_ROOT_GATE_DISABLE;\
+ type DPPCLK3_ROOT_GATE_DISABLE;\
+ type HDMISTREAMCLK0_ROOT_GATE_DISABLE;\
+ type SYMCLKA_ROOT_GATE_DISABLE;\
+ type SYMCLKB_ROOT_GATE_DISABLE;\
+ type SYMCLKC_ROOT_GATE_DISABLE;\
+ type SYMCLKD_ROOT_GATE_DISABLE;\
+ type SYMCLKE_ROOT_GATE_DISABLE;\
+ type PHYA_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYB_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYC_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYD_REFCLK_ROOT_GATE_DISABLE;\
+ type PHYE_REFCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK0_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK1_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK2_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK3_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK0_GATE_DISABLE;\
+ type DPSTREAMCLK1_GATE_DISABLE;\
+ type DPSTREAMCLK2_GATE_DISABLE;\
+ type DPSTREAMCLK3_GATE_DISABLE;\
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 0784d0198..fbf1b6370 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp)
(double_buffer_pending == 0);
}
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp)
+{
+ struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
+ uint32_t double_buffer_pending;
+ uint32_t dpg_en;
+
+ REG_GET(DPG_CONTROL, DPG_EN, &dpg_en);
+
+ REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending);
+
+ return (dpg_en == 1 && double_buffer_pending == 1);
+}
+
void opp2_program_left_edge_extra_pixel (
struct output_pixel_processor *opp,
bool count)
@@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 3ab221bdd..8f186abd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions(
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
+bool opp2_dpg_is_pending(struct output_pixel_processor *opp);
+
void opp2_dpg_set_blank_color(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
index 3a41a97b0..2b0b4f32e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: MIT
#
# Makefile for DCN.
-DCN201 = dcn201_init.o dcn201_resource.o \
- dcn201_hubbub.o\
- dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \
+DCN201 = dcn201_hubbub.o\
+ dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_dpp.o \
dcn201_dccg.o dcn201_link_encoder.o
AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
index 8e77db46a..6a71ba3df 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = {
.opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
.opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
.dpg_is_blanked = opp2_dpg_is_blanked,
+ .dpg_is_pending = opp2_dpg_is_pending,
.opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
.opp_destroy = opp1_destroy,
.opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
index ce1be0afa..ca92f5c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile
@@ -2,7 +2,7 @@
#
# Makefile for DCN21.
-DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \
+DCN21 = dcn21_hubp.o dcn21_hubbub.o \
dcn21_link_encoder.o dcn21_dccg.o
AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
index af4d2065d..b5b2aa3b3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile
@@ -23,12 +23,9 @@
#
#
-DCN30 := \
- dcn30_init.o \
- dcn30_hubbub.o \
+DCN30 := dcn30_hubbub.o \
dcn30_hubp.o \
dcn30_dpp.o \
- dcn30_optc.o \
dcn30_dccg.o \
dcn30_mpc.o dcn30_vpg.o \
dcn30_afmt.o \
@@ -38,7 +35,6 @@ DCN30 := \
dcn30_dwb_cm.o \
dcn30_cm_common.o \
dcn30_mmhubbub.o \
- dcn30_resource.o \
dcn30_dio_link_encoder.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index e43f77c11..5f97a868a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -56,16 +56,13 @@ static void dpp3_enable_cm_block(
static enum dc_lut_mode dpp30_get_gamcor_current(struct dpp *dpp_base)
{
- enum dc_lut_mode mode;
+ enum dc_lut_mode mode = LUT_BYPASS;
uint32_t state_mode;
uint32_t lut_mode;
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &state_mode);
- if (state_mode == 0)
- mode = LUT_BYPASS;
-
if (state_mode == 2) {//Programmable RAM LUT
REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_SELECT_CURRENT, &lut_mode);
if (lut_mode == 0)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
index 0d98918bf..1b9d9495f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.c
@@ -130,6 +130,28 @@ bool dwb3_disable(struct dwbc *dwbc)
return true;
}
+void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable)
+{
+ struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
+ unsigned int pre_locked;
+
+ REG_GET(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, &pre_locked);
+
+ /* Lock DWB registers */
+ if (pre_locked == 0)
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 1);
+
+ /* Disable FC */
+ REG_UPDATE(FC_MODE_CTRL, FC_FRAME_CAPTURE_EN, enable);
+
+ /* Unlock DWB registers */
+ if (pre_locked == 0)
+ REG_UPDATE(DWB_UPDATE_CTRL, DWB_UPDATE_LOCK, 0);
+
+ DC_LOG_DWB("%s dwb3_fc_disabled at inst = %d", __func__, dwbc->inst);
+}
+
+
bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params)
{
struct dcn30_dwbc *dwbc30 = TO_DCN30_DWBC(dwbc);
@@ -226,6 +248,7 @@ static const struct dwbc_funcs dcn30_dwbc_funcs = {
.disable = dwb3_disable,
.update = dwb3_update,
.is_enabled = dwb3_is_enabled,
+ .set_fc_enable = dwb3_set_fc_enable,
.set_stereo = dwb3_set_stereo,
.set_new_content = dwb3_set_new_content,
.dwb_program_output_csc = NULL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
index a5d1b81e7..332634b76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb.h
@@ -877,6 +877,8 @@ bool dwb3_update(struct dwbc *dwbc, struct dc_dwb_params *params);
bool dwb3_is_enabled(struct dwbc *dwbc);
+void dwb3_set_fc_enable(struct dwbc *dwbc, enum dwb_frame_capture_enable enable);
+
void dwb3_set_stereo(struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
index 701c7d8bc..03a50c32f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dwb_cm.c
@@ -243,6 +243,9 @@ static bool dwb3_program_ogam_lut(
return false;
}
+ if (params->hw_points_num == 0)
+ return false;
+
REG_SET(DWB_OGAM_CONTROL, 0, DWB_OGAM_MODE, 2);
current_mode = dwb3_get_ogam_current(dwbc30);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 30fbc5e06..d241f665e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -10,9 +10,8 @@
#
# Makefile for dcn30.
-DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
- dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o \
- dcn301_optc.o
+DCN301 = dcn301_dccg.o \
+ dcn301_dio_link_encoder.o dcn301_panel_cntl.o dcn301_hubbub.o
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
deleted file mode 100644
index 95b66baf3..000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile
+++ /dev/null
@@ -1,12 +0,0 @@
-#
-# (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved
-#
-# Authors: AMD
-#
-# Makefile for dcn302.
-
-DCN3_02 = dcn302_init.o dcn302_resource.o
-
-AMD_DAL_DCN3_02 = $(addprefix $(AMDDALPATH)/dc/dcn302/,$(DCN3_02))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN3_02)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
index d7b3ad780..a954e316a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/Makefile
@@ -6,7 +6,7 @@
#
# Makefile for dcn303.
-DCN3_03 = dcn303_init.o dcn303_resource.o
+DCN3_03 = dcn303_init.o
AMD_DAL_DCN3_03 = $(addprefix $(AMDDALPATH)/dc/dcn303/,$(DCN3_03))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 96e45c9ef..5d93ac16c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -10,8 +10,8 @@
#
# Makefile for dcn31.
-DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_init.o dcn31_hubp.o \
- dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
+DCN31 = dcn31_hubbub.o dcn31_hubp.o \
+ dcn31_dccg.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
dcn31_afmt.o dcn31_vpg.o
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
index 72456debb..b134ab05a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn314/Makefile
@@ -10,8 +10,7 @@
#
# Makefile for dcn314.
-DCN314 = dcn314_resource.o dcn314_init.o \
- dcn314_dio_stream_encoder.o dcn314_dccg.o dcn314_optc.o
+DCN314 = dcn314_dio_stream_encoder.o dcn314_dccg.o
AMD_DAL_DCN314 = $(addprefix $(AMDDALPATH)/dc/dcn314/,$(DCN314))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile b/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
deleted file mode 100644
index 59381d248..000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn315/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright © 2021 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dcn315.
-
-DCN315 = dcn315_resource.o
-
-AMD_DAL_DCN315 = $(addprefix $(AMDDALPATH)/dc/dcn315/,$(DCN315))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN315)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile b/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
deleted file mode 100644
index 819d44a94..000000000
--- a/drivers/gpu/drm/amd/display/dc/dcn316/Makefile
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-# Copyright 2021 Advanced Micro Devices, Inc.
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the "Software"),
-# to deal in the Software without restriction, including without limitation
-# the rights to use, copy, modify, merge, publish, distribute, sublicense,
-# and/or sell copies of the Software, and to permit persons to whom the
-# Software is furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
-# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
-# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
-# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
-# OTHER DEALINGS IN THE SOFTWARE.
-#
-# Authors: AMD
-#
-# Makefile for dcn316.
-
-DCN316 = dcn316_resource.o
-
-AMD_DAL_DCN316 = $(addprefix $(AMDDALPATH)/dc/dcn316/,$(DCN316))
-
-AMD_DISPLAY_FILES += $(AMD_DAL_DCN316)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
index 8bb251307..5314770ff 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/Makefile
@@ -10,10 +10,10 @@
#
# Makefile for dcn32.
-DCN32 = dcn32_resource.o dcn32_hubbub.o dcn32_init.o dcn32_dccg.o \
- dcn32_dccg.o dcn32_optc.o dcn32_mmhubbub.o dcn32_hubp.o dcn32_dpp.o \
- dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_hpo_dp_link_encoder.o \
- dcn32_resource_helpers.o dcn32_mpc.o
+DCN32 = dcn32_hubbub.o dcn32_dccg.o \
+ dcn32_mmhubbub.o dcn32_dpp.o dcn32_hubp.o dcn32_mpc.o \
+ dcn32_dio_stream_encoder.o dcn32_dio_link_encoder.o dcn32_resource_helpers.o \
+ dcn32_hpo_dp_link_encoder.o
AMD_DAL_DCN32 = $(addprefix $(AMDDALPATH)/dc/dcn32/,$(DCN32))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
index d761b0df2..8a0460e86 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c
@@ -34,6 +34,7 @@
#include "dc_bios_types.h"
#include "link_enc_cfg.h"
+#include "dc_dmub_srv.h"
#include "gpio_service_interface.h"
#ifndef MIN
@@ -61,6 +62,38 @@
#define AUX_REG_WRITE(reg_name, val) \
dm_write_reg(CTX, AUX_REG(reg_name), val)
+static uint8_t phy_id_from_transmitter(enum transmitter t)
+{
+ uint8_t phy_id;
+
+ switch (t) {
+ case TRANSMITTER_UNIPHY_A:
+ phy_id = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ phy_id = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ phy_id = 2;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ phy_id = 3;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ phy_id = 4;
+ break;
+ case TRANSMITTER_UNIPHY_F:
+ phy_id = 5;
+ break;
+ case TRANSMITTER_UNIPHY_G:
+ phy_id = 6;
+ break;
+ default:
+ phy_id = 0;
+ break;
+ }
+ return phy_id;
+}
void enc32_hw_init(struct link_encoder *enc)
{
@@ -117,38 +150,50 @@ void dcn32_link_encoder_enable_dp_output(
}
}
-static bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+static bool query_dp_alt_from_dmub(struct link_encoder *enc,
+ union dmub_rb_cmd *cmd)
{
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t dp_alt_mode_disable = 0;
- bool is_usb_c_alt_mode = false;
- if (enc->features.flags.bits.DP_IS_USB_C) {
- /* if value == 1 alt mode is disabled, otherwise it is enabled */
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
- is_usb_c_alt_mode = (dp_alt_mode_disable == 0);
- }
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->query_dp_alt.header.type = DMUB_CMD__VBIOS;
+ cmd->query_dp_alt.header.sub_type =
+ DMUB_CMD__VBIOS_TRANSMITTER_QUERY_DP_ALT;
+ cmd->query_dp_alt.header.payload_bytes = sizeof(cmd->query_dp_alt.data);
+ cmd->query_dp_alt.data.phy_id = phy_id_from_transmitter(enc10->base.transmitter);
+
+ if (!dc_wake_and_execute_dmub_cmd(enc->ctx, cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return false;
- return is_usb_c_alt_mode;
+ return true;
}
-static void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return false;
+
+ return (cmd.query_dp_alt.data.is_dp_alt_disable == 0);
+}
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
struct dc_link_settings *link_settings)
{
- struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
- uint32_t is_in_usb_c_dp4_mode = 0;
+ union dmub_rb_cmd cmd;
dcn10_link_encoder_get_max_link_cap(enc, link_settings);
- /* in usb c dp2 mode, max lane count is 2 */
- if (enc->funcs->is_in_alt_mode && enc->funcs->is_in_alt_mode(enc)) {
- REG_GET(RDPCSPIPE_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &is_in_usb_c_dp4_mode);
- if (!is_in_usb_c_dp4_mode)
- link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
- }
+ if (!query_dp_alt_from_dmub(enc, &cmd))
+ return;
+ if (cmd.query_dp_alt.data.is_usb &&
+ cmd.query_dp_alt.data.is_dp4 == 0)
+ link_settings->lane_count = MIN(LANE_COUNT_TWO, link_settings->lane_count);
}
+
static const struct link_encoder_funcs dcn32_link_enc_funcs = {
.read_state = link_enc2_read_state,
.validate_output_with_stream =
@@ -203,12 +248,12 @@ void dcn32_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
enc10->base.features = *enc_features;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.transmitter = init_data->transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
index bbcfce06b..2d5f25290 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.h
@@ -53,4 +53,9 @@ void dcn32_link_encoder_enable_dp_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source);
+bool dcn32_link_encoder_is_in_alt_mode(struct link_encoder *enc);
+
+void dcn32_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings);
+
#endif /* __DC_LINK_ENCODER__DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
index 3279b6102..e408e859b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c
@@ -71,12 +71,13 @@ void mpc32_power_on_blnd_lut(
{
struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc);
+ REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0, MPCC_MCM_1DLUT_MEM_PWR_DIS, power_on);
+
if (mpc->ctx->dc->debug.enable_mem_low_power.bits.cm) {
if (power_on) {
REG_UPDATE(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_1DLUT_MEM_PWR_STATE, 0, 1, 5);
} else if (!mpc->ctx->dc->debug.disable_mem_low_power) {
- ASSERT(false);
/* TODO: change to mpc
* dpp_base->ctx->dc->optimized_required = true;
* dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
index 1f8942849..f98def6c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c
@@ -24,10 +24,11 @@
*/
// header file of functions being implemented
-#include "dcn32_resource.h"
+#include "dcn32/dcn32_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dml/dcn32/display_mode_vba_util_32.h"
#include "dml/dcn32/dcn32_fpu.h"
+#include "dc_state_priv.h"
static bool is_dual_plane(enum surface_pixel_format format)
{
@@ -190,7 +191,7 @@ bool dcn32_subvp_in_use(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE)
+ if (dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE)
return true;
}
return false;
@@ -264,18 +265,17 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
// Do not override if a stream has multiple planes
for (i = 0; i < context->stream_count; i++) {
- if (context->stream_status[i].plane_count > 1) {
+ if (context->stream_status[i].plane_count > 1)
return;
- }
- if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM) {
+
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
stream_count++;
- }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
if (dcn32_allow_subvp_high_refresh_rate(dc, context, pipe_ctx)) {
if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
@@ -290,7 +290,7 @@ static void override_det_for_subvp(struct dc *dc, struct dc_state *context, uint
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->plane_state && pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && pipe_ctx->plane_state && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
if (pipe_ctx->stream->timing.v_addressable == 1080 && pipe_ctx->stream->timing.h_addressable == 1920) {
if (pipe_segments[i] > 4)
pipe_segments[i] = 4;
@@ -337,14 +337,14 @@ void dcn32_determine_det_override(struct dc *dc,
for (i = 0; i < context->stream_count; i++) {
/* Don't count SubVP streams for DET allocation */
- if (context->streams[i]->mall_stream_config.type != SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) != SUBVP_PHANTOM)
stream_count++;
}
if (stream_count > 0) {
stream_segments = 18 / stream_count;
for (i = 0; i < context->stream_count; i++) {
- if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_stream_subvp_type(context, context->streams[i]) == SUBVP_PHANTOM)
continue;
if (context->stream_status[i].plane_count > 0)
@@ -430,71 +430,6 @@ void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
dcn32_determine_det_override(dc, context, pipes);
}
-/**
- * dcn32_save_mall_state(): Save MALL (SubVP) state for fast validation cases
- *
- * This function saves the MALL (SubVP) case for fast validation cases. For fast validation,
- * there are situations where a shallow copy of the dc->current_state is created for the
- * validation. In this case we want to save and restore the mall config because we always
- * teardown subvp at the beginning of validation (and don't attempt to add it back if it's
- * fast validation). If we don't restore the subvp config in cases of fast validation +
- * shallow copy of the dc->current_state, the dc->current_state will have a partially
- * removed subvp state when we did not intend to remove it.
- *
- * NOTE: This function ONLY works if the streams are not moved to a different pipe in the
- * validation. We don't expect this to happen in fast_validation=1 cases.
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed
- * @temp_config: struct used to cache the existing MALL state
- *
- * Return: void
- */
-void dcn32_save_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config)
-{
- uint32_t i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream)
- temp_config->mall_stream_config[i] = pipe->stream->mall_stream_config;
-
- if (pipe->plane_state)
- temp_config->is_phantom_plane[i] = pipe->plane_state->is_phantom;
- }
-}
-
-/**
- * dcn32_restore_mall_state(): Restore MALL (SubVP) state for fast validation cases
- *
- * Restore the MALL state based on the previously saved state from dcn32_save_mall_state
- *
- * @dc: Current DC state
- * @context: New DC state to be programmed, restore MALL state into here
- * @temp_config: struct that has the cached MALL state
- *
- * Return: void
- */
-void dcn32_restore_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config)
-{
- uint32_t i;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (pipe->stream)
- pipe->stream->mall_stream_config = temp_config->mall_stream_config[i];
-
- if (pipe->plane_state)
- pipe->plane_state->is_phantom = temp_config->is_phantom_plane[i];
- }
-}
-
#define MAX_STRETCHED_V_BLANK 1000 // in micro-seconds (must ensure to match value in FW)
/*
* Scaling factor for v_blank stretch calculations considering timing in
@@ -589,13 +524,14 @@ static int get_refresh_rate(struct dc_stream_state *fpo_candidate_stream)
*
* Return: Pointer to FPO stream candidate if config can support FPO, otherwise NULL
*/
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context)
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context)
{
int refresh_rate = 0;
const int minimum_refreshrate_supported = 120;
struct dc_stream_state *fpo_candidate_stream = NULL;
bool is_fpo_vactive = false;
uint32_t fpo_vactive_margin_us = 0;
+ struct dc_stream_status *fpo_stream_status = NULL;
if (context == NULL)
return NULL;
@@ -618,16 +554,28 @@ struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stre
DC_FP_START();
dcn32_assign_fpo_vactive_candidate(dc, context, &fpo_candidate_stream);
DC_FP_END();
-
+ if (fpo_candidate_stream)
+ fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
DC_FP_START();
is_fpo_vactive = dcn32_find_vactive_pipe(dc, context, dc->debug.fpo_vactive_min_active_margin_us);
DC_FP_END();
if (!is_fpo_vactive || dc->debug.disable_fpo_vactive)
return NULL;
- } else
+ } else {
fpo_candidate_stream = context->streams[0];
+ if (fpo_candidate_stream)
+ fpo_stream_status = dc_state_get_stream_status(context, fpo_candidate_stream);
+ }
- if (!fpo_candidate_stream)
+ /* In DCN32/321, FPO uses per-pipe P-State force.
+ * If there's no planes, HUBP is power gated and
+ * therefore programming UCLK_PSTATE_FORCE does
+ * nothing (P-State will always be asserted naturally
+ * on a pipe that has HUBP power gated. Therefore we
+ * only want to enable FPO if the FPO pipe has both
+ * a stream and a plane.
+ */
+ if (!fpo_candidate_stream || !fpo_stream_status || fpo_stream_status->plane_count == 0)
return NULL;
if (fpo_candidate_stream->sink->edid_caps.panel_patch.disable_fams)
@@ -666,6 +614,30 @@ bool dcn32_check_native_scaling_for_res(struct pipe_ctx *pipe, unsigned int widt
}
/**
+ * disallow_subvp_in_active_plus_blank() - Function to determine disallowed subvp + drr/vblank configs
+ *
+ * @pipe: subvp pipe to be used for the subvp + drr/vblank config
+ *
+ * Since subvp is being enabled on more configs (such as 1080p60), we want
+ * to explicitly block any configs that we don't want to enable. We do not
+ * want to enable any 1080p60 (SubVP) + drr / vblank configs since these
+ * are already convered by FPO.
+ *
+ * Return: True if disallowed, false otherwise
+ */
+static bool disallow_subvp_in_active_plus_blank(struct pipe_ctx *pipe)
+{
+ bool disallow = false;
+
+ if (resource_is_pipe_type(pipe, OPP_HEAD) &&
+ resource_is_pipe_type(pipe, DPP_PIPE)) {
+ if (pipe->stream->timing.v_addressable == 1080 && pipe->stream->timing.h_addressable == 1920)
+ disallow = true;
+ }
+ return disallow;
+}
+
+/**
* dcn32_subvp_drr_admissable() - Determine if SubVP + DRR config is admissible
*
* @dc: Current DC state
@@ -688,21 +660,24 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
bool drr_pipe_found = false;
bool drr_psr_capable = false;
uint64_t refresh_rate = 0;
+ bool subvp_disallow = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
resource_is_pipe_type(pipe, DPP_PIPE)) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_mall_type == SUBVP_MAIN) {
subvp_count++;
+ subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE) {
non_subvp_pipes++;
drr_psr_capable = (drr_psr_capable || dcn32_is_psr_capable(pipe));
if (pipe->stream->ignore_msa_timing_param &&
@@ -713,7 +688,7 @@ bool dcn32_subvp_drr_admissable(struct dc *dc, struct dc_state *context)
}
}
- if (subvp_count == 1 && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
+ if (subvp_count == 1 && !subvp_disallow && non_subvp_pipes == 1 && drr_pipe_found && !drr_psr_capable &&
((uint32_t)refresh_rate < 120))
result = true;
@@ -746,21 +721,24 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
struct vba_vars_st *vba = &context->bw_ctx.dml.vba;
bool vblank_psr_capable = false;
uint64_t refresh_rate = 0;
+ bool subvp_disallow = false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
resource_is_pipe_type(pipe, DPP_PIPE)) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_mall_type == SUBVP_MAIN) {
subvp_count++;
+ subvp_disallow |= disallow_subvp_in_active_plus_blank(pipe);
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
}
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE) {
non_subvp_pipes++;
vblank_psr_capable = (vblank_psr_capable || dcn32_is_psr_capable(pipe));
if (pipe->stream->ignore_msa_timing_param &&
@@ -772,7 +750,7 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int
}
if (subvp_count == 1 && non_subvp_pipes == 1 && !drr_pipe_found && !vblank_psr_capable &&
- ((uint32_t)refresh_rate < 120) &&
+ ((uint32_t)refresh_rate < 120) && !subvp_disallow &&
vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_vblank_w_mall_sub_vp)
result = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
index 0a199c83b..c195c47f5 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn321/Makefile
@@ -10,7 +10,7 @@
#
# Makefile for dcn321.
-DCN321 = dcn321_resource.o dcn321_dio_link_encoder.o
+DCN321 = dcn321_dio_link_encoder.o
AMD_DAL_DCN321 = $(addprefix $(AMDDALPATH)/dc/dcn321/,$(DCN321))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
index 20d0eef1a..0e317e0c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/Makefile
@@ -10,9 +10,9 @@
#
# Makefile for DCN35.
-DCN35 = dcn35_resource.o dcn35_init.o dcn35_dio_stream_encoder.o \
- dcn35_dio_link_encoder.o dcn35_dccg.o dcn35_optc.o \
- dcn35_dsc.o dcn35_hubp.o dcn35_hubbub.o \
+DCN35 = dcn35_dio_stream_encoder.o \
+ dcn35_dio_link_encoder.o dcn35_dccg.o \
+ dcn35_hubp.o dcn35_hubbub.o \
dcn35_mmhubbub.o dcn35_opp.o dcn35_dpp.o dcn35_pg_cntl.o dcn35_dwb.o
AMD_DAL_DCN35 = $(addprefix $(AMDDALPATH)/dc/dcn35/,$(DCN35))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
index 479f3683c..f1ba7bb79 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.c
@@ -256,6 +256,21 @@ static void dccg35_set_dtbclk_dto(
if (params->ref_dtbclk_khz && req_dtbclk_khz) {
uint32_t modulo, phase;
+ switch (params->otg_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 1);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 1);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 1);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 1);
+ break;
+ }
+
// phase / modulo = dtbclk / dtbclk ref
modulo = params->ref_dtbclk_khz * 1000;
phase = req_dtbclk_khz * 1000;
@@ -280,6 +295,21 @@ static void dccg35_set_dtbclk_dto(
REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst],
PIPE_DTO_SRC_SEL[params->otg_inst], 2);
} else {
+ switch (params->otg_inst) {
+ case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, 0);
+ break;
+ case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, 0);
+ break;
+ }
+
REG_UPDATE_2(OTG_PIXEL_RATE_CNTL[params->otg_inst],
DTBCLK_DTO_ENABLE[params->otg_inst], 0,
PIPE_DTO_SRC_SEL[params->otg_inst], params->is_hdmi ? 0 : 1);
@@ -476,6 +506,64 @@ static void dccg35_dpp_root_clock_control(
dccg->dpp_clock_gated[dpp_inst] = !clock_on;
}
+static void dccg35_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, 0,
+ SYMCLK32_SE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE0_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
+ }
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, 0,
+ SYMCLK32_SE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE1_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
+ }
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, 0,
+ SYMCLK32_SE2_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE2_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
+ }
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, 0,
+ SYMCLK32_SE3_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_SE3_GATE_DISABLE, 0);
+// REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+// SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
+ }
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -484,7 +572,7 @@ void dccg35_init(struct dccg *dccg)
* will cause DCN to hang.
*/
for (otg_inst = 0; otg_inst < 4; otg_inst++)
- dccg31_disable_symclk32_se(dccg, otg_inst);
+ dccg35_disable_symclk32_se(dccg, otg_inst);
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
for (otg_inst = 0; otg_inst < 2; otg_inst++)
@@ -758,7 +846,7 @@ static const struct dccg_funcs dccg35_funcs = {
.dccg_init = dccg35_init,
.set_dpstreamclk = dccg35_set_dpstreamclk,
.enable_symclk32_se = dccg31_enable_symclk32_se,
- .disable_symclk32_se = dccg31_disable_symclk32_se,
+ .disable_symclk32_se = dccg35_disable_symclk32_se,
.enable_symclk32_le = dccg31_enable_symclk32_le,
.disable_symclk32_le = dccg31_disable_symclk32_le,
.set_symclk32_le_root_clock_gating = dccg31_set_symclk32_le_root_clock_gating,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
index 423feb4c2..1586a45ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dccg.h
@@ -34,6 +34,8 @@
#define DCCG_REG_LIST_DCN35() \
DCCG_REG_LIST_DCN314(),\
SR(DPPCLK_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL4),\
+ SR(DCCG_GATE_DISABLE_CNTL5),\
SR(DCCG_GATE_DISABLE_CNTL6),\
SR(DCCG_GLOBAL_FGCG_REP_CNTL),\
SR(SYMCLKA_CLOCK_ENABLE),\
@@ -174,7 +176,61 @@
DCCG_SF(SYMCLKB_CLOCK_ENABLE, SYMCLKB_FE_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLKC_CLOCK_ENABLE, SYMCLKC_FE_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLKD_CLOCK_ENABLE, SYMCLKD_FE_SRC_SEL, mask_sh),\
- DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh)
+ DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DSCCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, DPPCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, HDMICHARCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL6, HDMISTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKA_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKB_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKC_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYA_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYB_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYC_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYD_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL4, PHYE_REFCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL, DISPCLK_DCCG_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, HDMISTREAMCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
struct dccg *dccg35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
index 81e349d58..da94e5309 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c
@@ -184,6 +184,8 @@ void dcn35_link_encoder_construct(
enc10->base.hpd_source = init_data->hpd_source;
enc10->base.connector = init_data->connector;
+ if (enc10->base.connector.id == CONNECTOR_ID_USBC)
+ enc10->base.features.flags.bits.DP_IS_USB_C = 1;
enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
@@ -238,8 +240,6 @@ void dcn35_link_encoder_construct(
}
enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
- if (enc10->base.connector.id == CONNECTOR_ID_USBC)
- enc10->base.features.flags.bits.DP_IS_USB_C = 1;
if (bp_funcs->get_connector_speed_cap_info)
result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
index d19db8e9b..53bd0ae4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.c
@@ -342,13 +342,6 @@ void pg_cntl35_io_clk_pg_control(struct pg_cntl *pg_cntl, bool power_on)
pg_cntl->pg_res_enable[PG_DCIO] = power_on;
}
-void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on)
-{
- struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
-
- REG_UPDATE(DOMAIN22_PG_CONFIG, DOMAIN_POWER_FORCEON, power_on ? 1 : 0);
-}
-
static bool pg_cntl35_plane_otg_status(struct pg_cntl *pg_cntl)
{
struct dcn_pg_cntl *pg_cntl_dcn = TO_DCN_PG_CNTL(pg_cntl);
@@ -518,8 +511,7 @@ static const struct pg_cntl_funcs pg_cntl35_funcs = {
.mpcc_pg_control = pg_cntl35_mpcc_pg_control,
.opp_pg_control = pg_cntl35_opp_pg_control,
.optc_pg_control = pg_cntl35_optc_pg_control,
- .dwb_pg_control = pg_cntl35_dwb_pg_control,
- .set_force_poweron_domain22 = pg_cntl35_set_force_poweron_domain22
+ .dwb_pg_control = pg_cntl35_dwb_pg_control
};
struct pg_cntl *pg_cntl35_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
index 069dae08e..3de240884 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_pg_cntl.h
@@ -183,7 +183,6 @@ void pg_cntl35_optc_pg_control(struct pg_cntl *pg_cntl,
unsigned int optc_inst, bool power_on);
void pg_cntl35_dwb_pg_control(struct pg_cntl *pg_cntl, bool power_on);
void pg_cntl35_init_pg_status(struct pg_cntl *pg_cntl);
-void pg_cntl35_set_force_poweron_domain22(struct pg_cntl *pg_cntl, bool power_on);
struct pg_cntl *pg_cntl35_create(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 7ce9a5b6c..6d7a15dcf 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -103,10 +103,16 @@ enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
/*
* Sends ALLOCATE_PAYLOAD message.
*/
-bool dm_helpers_dp_mst_send_payload_allocation(
+void dm_helpers_dp_mst_send_payload_allocation(
struct dc_context *ctx,
- const struct dc_stream_state *stream,
- bool enable);
+ const struct dc_stream_state *stream);
+
+/*
+ * Update mst manager relevant variables
+ */
+void dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
+ struct dc_context *ctx,
+ const struct dc_stream_state *stream);
bool dm_helpers_dp_mst_start_top_mgr(
struct dc_context *ctx,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
index 4440d0874..bd7ba0a25 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h
@@ -247,6 +247,7 @@ struct pp_smu_funcs_nv {
#define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4
#define PP_SMU_NUM_DCLK_DPM_LEVELS 8
#define PP_SMU_NUM_VCLK_DPM_LEVELS 8
+#define PP_SMU_NUM_VPECLK_DPM_LEVELS 8
struct dpm_clock {
uint32_t Freq; // In MHz
@@ -262,6 +263,7 @@ struct dpm_clocks {
struct dpm_clock MemClocks[PP_SMU_NUM_MEMCLK_DPM_LEVELS];
struct dpm_clock VClocks[PP_SMU_NUM_VCLK_DPM_LEVELS];
struct dpm_clock DClocks[PP_SMU_NUM_DCLK_DPM_LEVELS];
+ struct dpm_clock VPEClocks[PP_SMU_NUM_VPECLK_DPM_LEVELS];
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
index 50b043435..0c4a8fe8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c
@@ -30,7 +30,7 @@
#include "dcn_calc_auto.h"
#include "dal_asic_id.h"
#include "resource.h"
-#include "dcn10/dcn10_resource.h"
+#include "resource/dcn10/dcn10_resource.h"
#include "dcn10/dcn10_hubbub.h"
#include "dml/dml1_display_rq_dlg_calc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index d2271e308..38ab9ad60 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -33,6 +33,7 @@
#include "link.h"
#include "dcn20_fpu.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -1182,7 +1183,7 @@ void dcn20_calculate_dlg_params(struct dc *dc,
pipes[pipe_idx].pipe.dest.vupdate_width = get_vupdate_width(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1532,7 +1533,7 @@ int dcn20_populate_dml_pipes_from_context(struct dc *dc,
*/
if (res_ctx->pipe_ctx[i].plane_state &&
(res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE ||
- res_ctx->pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM))
+ dc_state_get_pipe_subvp_type(context, &res_ctx->pipe_ctx[i]) == SUBVP_PHANTOM))
pipes[pipe_cnt].pipe.src.num_cursors = 0;
else
pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
index 3686f1e7d..63c48c29b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_mode_vba_30.c
@@ -3542,7 +3542,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
{
struct vba_vars_st *v = &mode_lib->vba;
int MinPrefetchMode, MaxPrefetchMode;
- int i;
+ int i, start_state;
unsigned int j, k, m;
bool EnoughWritebackUnits = true;
bool WritebackModeSupport = true;
@@ -3553,6 +3553,11 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*MODE SUPPORT, VOLTAGE STATE AND SOC CONFIGURATION*/
+ if (mode_lib->validate_max_state)
+ start_state = v->soc.num_states - 1;
+ else
+ start_state = 0;
+
CalculateMinAndMaxPrefetchMode(
mode_lib->vba.AllowDRAMSelfRefreshOrDRAMClockChangeInVblank,
&MinPrefetchMode, &MaxPrefetchMode);
@@ -3851,7 +3856,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SingleDPPViewportSizeSupportPerPlane,
&v->ViewportSizeSupport[0][0]);
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDispclk[i], v->DISPCLKDPPCLKVCOSpeed);
v->MaxDppclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown(v->MaxDppclk[i], v->DISPCLKDPPCLKVCOSpeed);
@@ -4007,7 +4012,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*Total Available Pipes Support Check*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
if (v->TotalNumberOfActiveDPP[i][j] <= v->MaxNumDPP) {
v->TotalAvailablePipesSupport[i][j] = true;
@@ -4046,7 +4051,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->RequiresDSC[i][k] = false;
v->RequiresFEC[i][k] = false;
@@ -4174,7 +4179,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
v->DIOSupport[i] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (!v->skip_dio_check[k] && v->BlendingAndTiming[k] == k && (v->Output[k] == dm_dp || v->Output[k] == dm_edp || v->Output[k] == dm_hdmi)
@@ -4185,7 +4190,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
}
- for (i = 0; i < v->soc.num_states; ++i) {
+ for (i = start_state; i < v->soc.num_states; ++i) {
v->ODMCombine4To1SupportCheckOK[i] = true;
for (k = 0; k < v->NumberOfActivePlanes; ++k) {
if (v->BlendingAndTiming[k] == k && v->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_4to1
@@ -4197,7 +4202,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/* Skip dscclk validation: as long as dispclk is supported, dscclk is also implicitly supported */
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
v->NotEnoughDSCUnits[i] = false;
v->TotalDSCUnitsRequired = 0.0;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -4217,7 +4222,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*DSC Delay per state*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
if (v->OutputBppPerState[i][k] == BPP_INVALID) {
v->BPP = 0.0;
@@ -4333,7 +4338,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->cursor_bw[k] = v->NumberOfCursors[k] * v->CursorWidth[k][0] * v->CursorBPP[k][0] / 8.0 / (v->HTotal[k] / v->PixelClock[k]) * v->VRatio[k];
}
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
v->swath_width_luma_ub_this_state[k] = v->swath_width_luma_ub_all_states[i][j][k];
@@ -5075,7 +5080,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
/*PTE Buffer Size Check*/
- for (i = 0; i < v->soc.num_states; i++) {
+ for (i = start_state; i < v->soc.num_states; i++) {
for (j = 0; j < 2; j++) {
v->PTEBufferSizeNotExceeded[i][j] = true;
for (k = 0; k <= v->NumberOfActivePlanes - 1; k++) {
@@ -5136,7 +5141,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
/*Mode Support, Voltage State and SOC Configuration*/
- for (i = v->soc.num_states - 1; i >= 0; i--) {
+ for (i = v->soc.num_states - 1; i >= start_state; i--) {
for (j = 0; j < 2; j++) {
if (v->ScaleRatioAndTapsSupport == 1 && v->SourceFormatPixelAndScanSupport == 1 && v->ViewportSizeSupport[i][j] == 1
&& v->DIOSupport[i] == 1 && v->ODMCombine4To1SupportCheckOK[i] == 1
@@ -5158,7 +5163,7 @@ void dml30_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
}
{
unsigned int MaximumMPCCombine = 0;
- for (i = v->soc.num_states; i >= 0; i--) {
+ for (i = v->soc.num_states; i >= start_state; i--) {
if (i == v->soc.num_states || v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true) {
v->VoltageLevel = i;
v->ModeIsSupported = v->ModeSupport[i][0] == true || v->ModeSupport[i][1] == true;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
index b315ca6f1..ba76dd4a2 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
@@ -32,6 +32,7 @@
#include "clk_mgr/dcn32/dcn32_smu13_driver_if.h"
#include "dcn30/dcn30_resource.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -45,6 +46,14 @@ static const struct subvp_high_refresh_list subvp_high_refresh_list = {
{.width = 1920, .height = 1080, }},
};
+static const struct subvp_active_margin_list subvp_active_margin_list = {
+ .min_refresh = 55,
+ .max_refresh = 65,
+ .res = {
+ {.width = 2560, .height = 1440, },
+ {.width = 1920, .height = 1080, }},
+};
+
struct _vcs_dpi_ip_params_st dcn3_2_ip = {
.gpuvm_enable = 0,
.gpuvm_max_page_table_levels = 4,
@@ -333,7 +342,7 @@ void dcn32_helper_populate_phantom_dlg_params(struct dc *dc,
if (!pipe->stream)
continue;
- if (pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->plane_state && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
pipes[pipe_idx].pipe.dest.vstartup_start =
get_vstartup(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
pipes[pipe_idx].pipe.dest.vupdate_offset =
@@ -616,7 +625,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc,
if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) &&
!(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) &&
(!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE &&
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE &&
(refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) &&
!pipe->plane_state->address.tmz_surface &&
(vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0 ||
@@ -674,7 +683,7 @@ static bool dcn32_enough_pipes_for_subvp(struct dc *dc, struct dc_state *context
// Find the minimum pipe split count for non SubVP pipes
if (resource_is_pipe_type(pipe, OPP_HEAD) &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
split_cnt++;
@@ -727,8 +736,8 @@ static bool subvp_subvp_schedulable(struct dc *dc, struct dc_state *context)
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- phantom = pipe->stream->mall_stream_config.paired_stream;
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ phantom = dc_state_get_paired_subvp_stream(context, pipe->stream);
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
@@ -796,6 +805,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
int16_t stretched_drr_us = 0;
int16_t drr_stretched_vblank_us = 0;
int16_t max_vblank_mallregion = 0;
+ struct dc_stream_state *phantom_stream;
+ bool subvp_found = false;
+ bool drr_found = false;
// Find SubVP pipe
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -808,8 +820,10 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ subvp_found = true;
break;
+ }
}
// Find the DRR pipe
@@ -817,32 +831,37 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
drr_pipe = &context->res_ctx.pipe_ctx[i];
// We check for master pipe only
- if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
- !resource_is_pipe_type(pipe, DPP_PIPE))
+ if (!resource_is_pipe_type(drr_pipe, OTG_MASTER) ||
+ !resource_is_pipe_type(drr_pipe, DPP_PIPE))
continue;
- if (drr_pipe->stream->mall_stream_config.type == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
- (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed))
+ if (dc_state_get_pipe_subvp_type(context, drr_pipe) == SUBVP_NONE && drr_pipe->stream->ignore_msa_timing_param &&
+ (drr_pipe->stream->allow_freesync || drr_pipe->stream->vrr_active_variable || drr_pipe->stream->vrr_active_fixed)) {
+ drr_found = true;
break;
+ }
}
- main_timing = &pipe->stream->timing;
- phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
- drr_timing = &drr_pipe->stream->timing;
- prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
- (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
- dc->caps.subvp_prefetch_end_to_mall_start_us;
- subvp_active_us = main_timing->v_addressable * main_timing->h_total /
- (double)(main_timing->pix_clk_100hz * 100) * 1000000;
- drr_frame_us = drr_timing->v_total * drr_timing->h_total /
- (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
- // P-State allow width and FW delays already included phantom_timing->v_addressable
- mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
- (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
- stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
- drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
- (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
- max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+ if (subvp_found && drr_found) {
+ phantom_stream = dc_state_get_paired_subvp_stream(context, pipe->stream);
+ main_timing = &pipe->stream->timing;
+ phantom_timing = &phantom_stream->timing;
+ drr_timing = &drr_pipe->stream->timing;
+ prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
+ (double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
+ dc->caps.subvp_prefetch_end_to_mall_start_us;
+ subvp_active_us = main_timing->v_addressable * main_timing->h_total /
+ (double)(main_timing->pix_clk_100hz * 100) * 1000000;
+ drr_frame_us = drr_timing->v_total * drr_timing->h_total /
+ (double)(drr_timing->pix_clk_100hz * 100) * 1000000;
+ // P-State allow width and FW delays already included phantom_timing->v_addressable
+ mall_region_us = phantom_timing->v_addressable * phantom_timing->h_total /
+ (double)(phantom_timing->pix_clk_100hz * 100) * 1000000;
+ stretched_drr_us = drr_frame_us + mall_region_us + SUBVP_DRR_MARGIN_US;
+ drr_stretched_vblank_us = (drr_timing->v_total - drr_timing->v_addressable) * drr_timing->h_total /
+ (double)(drr_timing->pix_clk_100hz * 100) * 1000000 + (stretched_drr_us - drr_frame_us);
+ max_vblank_mallregion = drr_stretched_vblank_us > mall_region_us ? drr_stretched_vblank_us : mall_region_us;
+ }
/* We consider SubVP + DRR schedulable if the stretched frame duration of the DRR display (i.e. the
* highest refresh rate + margin that can support UCLK P-State switch) passes the static analysis
@@ -887,6 +906,8 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *vblank_timing = NULL;
+ struct dc_stream_state *phantom_stream;
+ enum mall_stream_type pipe_mall_type;
/* For SubVP + VBLANK/DRR cases, we assume there can only be
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -896,6 +917,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
*/
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
@@ -903,18 +925,19 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
!resource_is_pipe_type(pipe, DPP_PIPE))
continue;
- if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (!found && pipe_mall_type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
vblank_index = i;
found = true;
}
- if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
if (found) {
+ phantom_stream = dc_state_get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
- phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
// Also include the prefetch end to mallstart delay time
@@ -969,7 +992,7 @@ static bool subvp_subvp_admissable(struct dc *dc,
continue;
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
@@ -1018,23 +1041,23 @@ static bool subvp_validate_static_schedulability(struct dc *dc,
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe) {
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (pipe_mall_type == SUBVP_MAIN)
subvp_count++;
- if (pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (pipe_mall_type == SUBVP_NONE)
non_subvp_pipes++;
- }
}
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vlevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] > 0 &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
vactive_count++;
}
pipe_idx++;
@@ -1070,7 +1093,7 @@ static void assign_subvp_index(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
pipe_ctx->subvp_index = index++;
} else {
pipe_ctx->subvp_index = 0;
@@ -1414,6 +1437,7 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
unsigned int dc_pipe_idx = 0;
int i = 0;
bool found_supported_config = false;
+ int vlevel_temp = 0;
dc_assert_fp_enabled();
@@ -1446,13 +1470,15 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
*/
if (!dc->debug.force_disable_subvp && !dc->caps.dmub_caps.gecc_enable && dcn32_all_pipes_have_stream_and_plane(dc, context) &&
!dcn32_mpo_in_use(context) && !dcn32_any_surfaces_rotated(dc, context) && !is_test_pattern_enabled(context) &&
- (*vlevel == context->bw_ctx.dml.soc.num_states ||
+ (*vlevel == context->bw_ctx.dml.soc.num_states || (vba->DRAMSpeedPerState[*vlevel] != vba->DRAMSpeedPerState[0] &&
+ vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] != dm_dram_clock_change_unsupported) ||
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported ||
dc->debug.force_subvp_mclk_switch)) {
dcn32_merge_pipes_for_subvp(dc, context);
memset(merge, 0, MAX_PIPES * sizeof(bool));
+ vlevel_temp = *vlevel;
/* to re-initialize viewport after the pipe merge */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -1521,10 +1547,14 @@ static void dcn32_full_validate_bw_helper(struct dc *dc,
}
}
+ if (vba->DRAMSpeedPerState[*vlevel] >= vba->DRAMSpeedPerState[vlevel_temp])
+ found_supported_config = false;
+
// If SubVP pipe config is unsupported (or cannot be used for UCLK switching)
// remove phantom pipes and repopulate dml pipes
if (!found_supported_config) {
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, false);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported;
*pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false);
@@ -1676,7 +1706,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
pipes[pipe_idx].pipe.dest.vready_offset = get_vready_offset(&context->bw_ctx.dml, pipes, pipe_cnt,
pipe_idx);
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[i].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[i].unbounded_req = false;
@@ -1708,7 +1738,7 @@ static void dcn32_calculate_dlg_params(struct dc *dc, struct dc_state *context,
context->res_ctx.pipe_ctx[i].plane_state != context->res_ctx.pipe_ctx[i].top_pipe->plane_state) &&
context->res_ctx.pipe_ctx[i].prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
- if (context->res_ctx.pipe_ctx[i].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) != SUBVP_PHANTOM) {
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[i].surface_size_in_mall_bytes;
if (context->res_ctx.pipe_ctx[i].stream->link->psr_settings.psr_version == DC_PSR_VERSION_UNSUPPORTED) {
@@ -1922,7 +1952,8 @@ bool dcn32_internal_validate_bw(struct dc *dc,
return false;
// For each full update, remove all existing phantom pipes first
- dc->res_pool->funcs->remove_phantom_pipes(dc, context, fast_validate);
+ dc_state_remove_phantom_streams_and_planes(dc, context);
+ dc_state_release_phantom_streams_and_planes(dc, context);
dc->res_pool->funcs->update_soc_for_wm_a(dc, context);
@@ -2729,7 +2760,7 @@ static int build_synthetic_soc_states(bool disable_dc_mode_overwrite, struct clk
struct _vcs_dpi_voltage_scaling_st entry = {0};
struct clk_limit_table_entry max_clk_data = {0};
- unsigned int min_dcfclk_mhz = 199, min_fclk_mhz = 299;
+ unsigned int min_dcfclk_mhz = 399, min_fclk_mhz = 599;
static const unsigned int num_dcfclk_stas = 5;
unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {199, 615, 906, 1324, 1564};
@@ -3305,25 +3336,24 @@ bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe)
{
bool allow = false;
uint32_t refresh_rate = 0;
+ uint32_t min_refresh = subvp_active_margin_list.min_refresh;
+ uint32_t max_refresh = subvp_active_margin_list.max_refresh;
+ uint32_t i;
- /* Allow subvp on displays that have active margin for 2560x1440@60hz displays
- * only for now. There must be no scaling as well.
- *
- * For now we only enable on 2560x1440@60hz displays to enable 4K60 + 1440p60 configs
- * for p-state switching.
- */
- if (pipe->stream && pipe->plane_state) {
- refresh_rate = (pipe->stream->timing.pix_clk_100hz * 100 +
- pipe->stream->timing.v_total * pipe->stream->timing.h_total - 1)
- / (double)(pipe->stream->timing.v_total * pipe->stream->timing.h_total);
- if (pipe->stream->timing.v_addressable == 1440 &&
- pipe->stream->timing.h_addressable == 2560 &&
- refresh_rate >= 55 && refresh_rate <= 65 &&
- pipe->plane_state->src_rect.height == 1440 &&
- pipe->plane_state->src_rect.width == 2560 &&
- pipe->plane_state->dst_rect.height == 1440 &&
- pipe->plane_state->dst_rect.width == 2560)
+ for (i = 0; i < SUBVP_ACTIVE_MARGIN_LIST_LEN; i++) {
+ uint32_t width = subvp_active_margin_list.res[i].width;
+ uint32_t height = subvp_active_margin_list.res[i].height;
+
+ refresh_rate = (pipe->stream->timing.pix_clk_100hz * (uint64_t)100 +
+ pipe->stream->timing.v_total * pipe->stream->timing.h_total - (uint64_t)1);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.v_total);
+ refresh_rate = div_u64(refresh_rate, pipe->stream->timing.h_total);
+
+ if (refresh_rate >= min_refresh && refresh_rate <= max_refresh &&
+ dcn32_check_native_scaling_for_res(pipe, width, height)) {
allow = true;
+ break;
+ }
}
return allow;
}
@@ -3442,7 +3472,15 @@ void dcn32_assign_fpo_vactive_candidate(struct dc *dc, const struct dc_state *co
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
const struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (!pipe->stream)
+ /* In DCN32/321, FPO uses per-pipe P-State force.
+ * If there's no planes, HUBP is power gated and
+ * therefore programming UCLK_PSTATE_FORCE does
+ * nothing (P-State will always be asserted naturally
+ * on a pipe that has HUBP power gated. Therefore we
+ * only want to enable FPO if the FPO pipe has both
+ * a stream and a plane.
+ */
+ if (!pipe->stream || !pipe->plane_state)
continue;
if (vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
index f154a3eb1..7ea2bd537 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
@@ -164,11 +164,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = {
},
},
.num_states = 5,
- .sr_exit_time_us = 14.0,
- .sr_enter_plus_exit_time_us = 16.0,
- .sr_exit_z8_time_us = 525.0,
- .sr_enter_plus_exit_z8_time_us = 715.0,
- .fclk_change_latency_us = 20.0,
+ .sr_exit_time_us = 28.0,
+ .sr_enter_plus_exit_time_us = 30.0,
+ .sr_exit_z8_time_us = 210.0,
+ .sr_enter_plus_exit_z8_time_us = 320.0,
+ .fclk_change_latency_us = 24.0,
.usr_retraining_latency_us = 2,
.writeback_latency_us = 12.0,
@@ -326,6 +326,25 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc,
dcn3_5_soc.dram_clock_change_latency_us =
dc->debug.dram_clock_change_latency_ns / 1000.0;
}
+
+ if (dc->bb_overrides.dram_clock_change_latency_ns > 0)
+ dcn3_5_soc.dram_clock_change_latency_us =
+ dc->bb_overrides.dram_clock_change_latency_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_time_ns > 0)
+ dcn3_5_soc.sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_time_ns > 0)
+ dcn3_5_soc.sr_enter_plus_exit_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_exit_z8_time_ns > 0)
+ dcn3_5_soc.sr_exit_z8_time_us = dc->bb_overrides.sr_exit_z8_time_ns / 1000.0;
+
+ if (dc->bb_overrides.sr_enter_plus_exit_z8_time_ns > 0)
+ dcn3_5_soc.sr_enter_plus_exit_z8_time_us =
+ dc->bb_overrides.sr_enter_plus_exit_z8_time_ns / 1000.0;
+
/*temp till dml2 fully work without dml1*/
dml_init_instance(&dc->dml, &dcn3_5_soc, &dcn3_5_ip,
DML_PROJECT_DCN31);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
index 1a2b24cc6..0baf39d64 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_resource_mgmt.c
@@ -772,18 +772,29 @@ static unsigned int get_mpc_factor(struct dml2_context *ctx,
const struct dc_state *state,
const struct dml_display_cfg_st *disp_cfg,
struct dml2_dml_to_dc_pipe_mapping *mapping,
- const struct dc_stream_status *status, unsigned int stream_id,
+ const struct dc_stream_status *status,
+ const struct dc_stream_state *stream,
int plane_idx)
{
unsigned int plane_id;
unsigned int cfg_idx;
+ unsigned int mpc_factor;
- get_plane_id(ctx, state, status->plane_states[plane_idx], stream_id, plane_idx, &plane_id);
+ get_plane_id(ctx, state, status->plane_states[plane_idx],
+ stream->stream_id, plane_idx, &plane_id);
cfg_idx = find_disp_cfg_idx_by_plane_id(mapping, plane_id);
- if (ctx->architecture == dml2_architecture_20)
- return (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
- ASSERT(false);
- return 1;
+ if (ctx->architecture == dml2_architecture_20) {
+ mpc_factor = (unsigned int)disp_cfg->hw.DPPPerSurface[cfg_idx];
+ } else {
+ mpc_factor = 1;
+ ASSERT(false);
+ }
+
+ /* For stereo timings, we need to pipe split */
+ if (dml2_is_stereo_timing(stream))
+ mpc_factor = 2;
+
+ return mpc_factor;
}
static unsigned int get_odm_factor(
@@ -820,14 +831,13 @@ static void populate_mpc_factors_for_stream(
unsigned int mpc_factors[MAX_PIPES])
{
const struct dc_stream_status *status = &state->stream_status[stream_idx];
- unsigned int stream_id = state->streams[stream_idx]->stream_id;
int i;
for (i = 0; i < status->plane_count; i++)
if (odm_factor == 1)
mpc_factors[i] = get_mpc_factor(
ctx, state, disp_cfg, mapping, status,
- stream_id, i);
+ state->streams[stream_idx], i);
else
mpc_factors[i] = 1;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
index e85866db8..7ca7f2a74 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_dc_types.h
@@ -38,5 +38,6 @@
#include "core_types.h"
#include "dsc.h"
#include "clk_mgr.h"
+#include "dc_state_priv.h"
#endif //__DML2_DC_TYPES_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
index 32f8a43af..282d70e2b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c
@@ -51,7 +51,7 @@ unsigned int dml2_helper_calculate_num_ways_for_subvp(struct dml2_context *ctx,
// Find the phantom pipes
if (pipe->stream && pipe->plane_state && !pipe->top_pipe && !pipe->prev_odm_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
bytes_per_pixel = pipe->plane_state->format >= SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616 ? 8 : 4;
mblk_width = ctx->config.mall_cfg.mblk_width_pixels;
mblk_height = bytes_per_pixel == 4 ? mblk_width = ctx->config.mall_cfg.mblk_height_4bpe_pixels : ctx->config.mall_cfg.mblk_height_8bpe_pixels;
@@ -253,7 +253,7 @@ static bool assign_subvp_pipe(struct dml2_context *ctx, struct dc_state *context
* to combine this with SubVP can cause issues with the scheduling).
*/
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE && refresh_rate < 120 &&
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_NONE && refresh_rate < 120 &&
vba->ActiveDRAMClockChangeLatencyMarginPerState[vba->VoltageLevel][vba->maxMpcComb][vba->pipe_plane[pipe_idx]] <= 0) {
while (pipe) {
num_pipes++;
@@ -317,7 +317,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st
// Find the minimum pipe split count for non SubVP pipes
if (pipe->stream && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_NONE) {
split_cnt = 0;
while (pipe) {
split_cnt++;
@@ -372,8 +372,8 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
if (pipe->stream && pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
- phantom = pipe->stream->mall_stream_config.paired_stream;
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN) {
+ phantom = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
microschedule_lines = (phantom->timing.v_total - phantom->timing.v_front_porch) +
phantom->timing.v_addressable;
@@ -435,6 +435,7 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
struct pipe_ctx *pipe = NULL;
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
+ struct dc_stream_state *phantom_stream;
int16_t prefetch_us = 0;
int16_t mall_region_us = 0;
int16_t drr_frame_us = 0; // nominal frame time
@@ -453,12 +454,13 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
continue;
// Find the SubVP pipe
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
break;
}
+ phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
main_timing = &pipe->stream->timing;
- phantom_timing = &pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
prefetch_us = (phantom_timing->v_total - phantom_timing->v_front_porch) * phantom_timing->h_total /
(double)(phantom_timing->pix_clk_100hz * 100) * 1000000 +
ctx->config.svp_pstate.subvp_prefetch_end_to_mall_start_us;
@@ -519,6 +521,8 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
struct dc_crtc_timing *main_timing = NULL;
struct dc_crtc_timing *phantom_timing = NULL;
struct dc_crtc_timing *vblank_timing = NULL;
+ struct dc_stream_state *phantom_stream;
+ enum mall_stream_type pipe_mall_type;
/* For SubVP + VBLANK/DRR cases, we assume there can only be
* a single VBLANK/DRR display. If DML outputs SubVP + VBLANK
@@ -528,19 +532,20 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
*/
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
// We check for master pipe, but it shouldn't matter since we only need
// the pipe for timing info (stream should be same for any pipe splits)
if (!pipe->stream || !pipe->plane_state || pipe->top_pipe || pipe->prev_odm_pipe)
continue;
- if (!found && pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ if (!found && pipe_mall_type == SUBVP_NONE) {
// Found pipe which is not SubVP or Phantom (i.e. the VBLANK pipe).
vblank_index = i;
found = true;
}
- if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
// Use ignore_msa_timing_param flag to identify as DRR
@@ -548,8 +553,9 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
// SUBVP + DRR case
schedulable = dml2_svp_drr_schedulable(ctx, context, &context->res_ctx.pipe_ctx[vblank_index].stream->timing);
} else if (found) {
+ phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, subvp_pipe->stream);
main_timing = &subvp_pipe->stream->timing;
- phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ phantom_timing = &phantom_stream->timing;
vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
// Prefetch time is equal to VACTIVE + BP + VSYNC of the phantom pipe
// Also include the prefetch end to mallstart delay time
@@ -602,19 +608,20 @@ bool dml2_svp_validate_static_schedulability(struct dml2_context *ctx, struct dc
for (i = 0, pipe_idx = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ enum mall_stream_type pipe_mall_type = ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(context, pipe);
if (!pipe->stream)
continue;
if (pipe->plane_state && !pipe->top_pipe &&
- pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ pipe_mall_type == SUBVP_MAIN)
subvp_count++;
// Count how many planes that aren't SubVP/phantom are capable of VACTIVE
// switching (SubVP + VACTIVE unsupported). In situations where we force
// SubVP for a VACTIVE plane, we don't want to increment the vactive_count.
if (vba->ActiveDRAMClockChangeLatencyMargin[vba->pipe_plane[pipe_idx]] > 0 &&
- pipe->stream->mall_stream_config.type == SUBVP_NONE) {
+ pipe_mall_type == SUBVP_NONE) {
vactive_count++;
}
pipe_idx++;
@@ -708,14 +715,10 @@ static void set_phantom_stream_timing(struct dml2_context *ctx, struct dc_state
static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, struct dc_state *state, unsigned int dc_pipe_idx, unsigned int svp_height, unsigned int vstartup)
{
struct pipe_ctx *ref_pipe = &state->res_ctx.pipe_ctx[dc_pipe_idx];
- struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_stream_for_sink(ref_pipe->stream->sink);
-
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+ struct dc_stream_state *phantom_stream = ctx->config.svp_pstate.callbacks.create_phantom_stream(
+ ctx->config.svp_pstate.callbacks.dc,
+ state,
+ ref_pipe->stream);
/* stream has limited viewport and small timing */
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -723,7 +726,10 @@ static struct dc_stream_state *enable_phantom_stream(struct dml2_context *ctx, s
memcpy(&phantom_stream->dst, &ref_pipe->stream->dst, sizeof(phantom_stream->dst));
set_phantom_stream_timing(ctx, state, ref_pipe, phantom_stream, dc_pipe_idx, svp_height, vstartup);
- ctx->config.svp_pstate.callbacks.add_stream_to_ctx(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+ ctx->config.svp_pstate.callbacks.add_phantom_stream(ctx->config.svp_pstate.callbacks.dc,
+ state,
+ phantom_stream,
+ ref_pipe->stream);
return phantom_stream;
}
@@ -740,7 +746,10 @@ static void enable_phantom_plane(struct dml2_context *ctx,
if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state) {
phantom_plane = prev_phantom_plane;
} else {
- phantom_plane = ctx->config.svp_pstate.callbacks.create_plane(ctx->config.svp_pstate.callbacks.dc);
+ phantom_plane = ctx->config.svp_pstate.callbacks.create_phantom_plane(
+ ctx->config.svp_pstate.callbacks.dc,
+ state,
+ curr_pipe->plane_state);
}
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
@@ -763,9 +772,7 @@ static void enable_phantom_plane(struct dml2_context *ctx,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->timing.v_addressable;
- phantom_plane->is_phantom = true;
-
- ctx->config.svp_pstate.callbacks.add_plane_to_context(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
+ ctx->config.svp_pstate.callbacks.add_phantom_plane(ctx->config.svp_pstate.callbacks.dc, phantom_stream, phantom_plane, state);
curr_pipe = curr_pipe->bottom_pipe;
prev_phantom_plane = phantom_plane;
@@ -790,7 +797,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
// We determine which phantom pipes were added by comparing with
// the phantom stream.
if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
pipe->stream->use_dynamic_meta = false;
pipe->plane_state->flip_immediate = false;
if (!ctx->config.svp_pstate.callbacks.build_scaling_params(pipe)) {
@@ -800,7 +807,7 @@ static void add_phantom_pipes_for_main_pipe(struct dml2_context *ctx, struct dc_
}
}
-static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
+static bool remove_all_phantom_planes_for_stream(struct dml2_context *ctx, struct dc_stream_state *stream, struct dc_state *context)
{
int i, old_plane_count;
struct dc_stream_status *stream_status = NULL;
@@ -821,9 +828,11 @@ static bool remove_all_planes_for_stream(struct dml2_context *ctx, struct dc_str
for (i = 0; i < old_plane_count; i++)
del_planes[i] = stream_status->plane_states[i];
- for (i = 0; i < old_plane_count; i++)
- if (!ctx->config.svp_pstate.callbacks.remove_plane_from_context(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
+ for (i = 0; i < old_plane_count; i++) {
+ if (!ctx->config.svp_pstate.callbacks.remove_phantom_plane(ctx->config.svp_pstate.callbacks.dc, stream, del_planes[i], context))
return false;
+ ctx->config.svp_pstate.callbacks.release_phantom_plane(ctx->config.svp_pstate.callbacks.dc, context, del_planes[i]);
+ }
return true;
}
@@ -832,35 +841,21 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state
{
int i;
bool removed_pipe = false;
- struct dc_plane_state *phantom_plane = NULL;
struct dc_stream_state *phantom_stream = NULL;
for (i = 0; i < ctx->config.dcn_pipe_count; i++) {
struct pipe_ctx *pipe = &state->res_ctx.pipe_ctx[i];
// build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
+ if (pipe->plane_state && pipe->stream && ctx->config.svp_pstate.callbacks.get_pipe_subvp_type(state, pipe) == SUBVP_PHANTOM) {
phantom_stream = pipe->stream;
- remove_all_planes_for_stream(ctx, pipe->stream, state);
- ctx->config.svp_pstate.callbacks.remove_stream_from_ctx(ctx->config.svp_pstate.callbacks.dc, state, pipe->stream);
-
- /* Ref count is incremented on allocation and also when added to the context.
- * Therefore we must call release for the the phantom plane and stream once
- * they are removed from the ctx to finally decrement the refcount to 0 to free.
- */
- ctx->config.svp_pstate.callbacks.plane_state_release(phantom_plane);
- ctx->config.svp_pstate.callbacks.stream_release(phantom_stream);
+ remove_all_phantom_planes_for_stream(ctx, phantom_stream, state);
+ ctx->config.svp_pstate.callbacks.remove_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
+ ctx->config.svp_pstate.callbacks.release_phantom_stream(ctx->config.svp_pstate.callbacks.dc, state, phantom_stream);
removed_pipe = true;
}
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
-
if (pipe->plane_state) {
pipe->plane_state->is_phantom = false;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index b6744ad77..a20f28a5d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -1055,8 +1055,10 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
- int i = 0, j = 0;
+ int i = 0, j = 0, k = 0;
int disp_cfg_stream_location, disp_cfg_plane_location;
+ enum mall_stream_type stream_mall_type;
+ struct pipe_ctx *current_pipe_context;
for (i = 0; i < __DML2_WRAPPER_MAX_STREAMS_PLANES__; i++) {
dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id_valid[i] = false;
@@ -1076,7 +1078,17 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
for (i = 0; i < context->stream_count; i++) {
+ current_pipe_context = NULL;
+ for (k = 0; k < MAX_PIPES; k++) {
+ /* find one pipe allocated to this stream for the purpose of getting
+ info about the link later */
+ if (context->streams[i] == context->res_ctx.pipe_ctx[k].stream) {
+ current_pipe_context = &context->res_ctx.pipe_ctx[k];
+ break;
+ }
+ }
disp_cfg_stream_location = map_stream_to_dml_display_cfg(dml2, context->streams[i], dml_dispcfg);
+ stream_mall_type = dc_state_get_stream_subvp_type(context, context->streams[i]);
if (disp_cfg_stream_location < 0)
disp_cfg_stream_location = dml_dispcfg->num_timings++;
@@ -1084,7 +1096,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_stream_location] = dml_odm_use_policy_combine_2to1;
@@ -1121,10 +1133,10 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
populate_dml_surface_cfg_from_plane_state(dml2->v20.dml_core_ctx.project, &dml_dispcfg->surface, disp_cfg_plane_location, context->stream_status[i].plane_states[j]);
populate_dml_plane_cfg_from_plane_state(&dml_dispcfg->plane, disp_cfg_plane_location, context->stream_status[i].plane_states[j], context);
- if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN) {
+ if (stream_mall_type == SUBVP_MAIN) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_optimize;
- } else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM) {
+ } else if (stream_mall_type == SUBVP_PHANTOM) {
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
dml_dispcfg->plane.UseMALLForStaticScreen[disp_cfg_plane_location] = dml_use_mall_static_screen_disable;
dml2->v20.dml_core_ctx.policy.ImmediateFlipRequirement[disp_cfg_plane_location] = dml_immediate_flip_not_required;
@@ -1141,7 +1153,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (j >= 1) {
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], &context->res_ctx.pipe_ctx[i]);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1;
@@ -1153,9 +1165,9 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
break;
}
- if (context->streams[i]->mall_stream_config.type == SUBVP_MAIN)
+ if (stream_mall_type == SUBVP_MAIN)
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_sub_viewport;
- else if (context->streams[i]->mall_stream_config.type == SUBVP_PHANTOM)
+ else if (stream_mall_type == SUBVP_PHANTOM)
dml_dispcfg->plane.UseMALLForPStateChange[disp_cfg_plane_location] = dml_use_mall_pstate_change_phantom_pipe;
dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[disp_cfg_plane_location] = context->streams[i]->stream_id;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index d6a684841..1068b962d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -155,8 +155,12 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e
bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
{
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return false;
+
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
+
/* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0]) {
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
@@ -283,6 +287,7 @@ static void populate_pipe_ctx_dlg_params_from_dml(struct pipe_ctx *pipe_ctx, str
void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *context, struct resource_context *out_new_hw_state, struct dml2_context *in_ctx, unsigned int pipe_cnt)
{
unsigned int dc_pipe_ctx_index, dml_pipe_idx, plane_id;
+ enum mall_stream_type pipe_mall_type;
bool unbounded_req_enabled = false;
struct dml2_calculate_rq_and_dlg_params_scratch *s = &in_ctx->v20.scratch.calculate_rq_and_dlg_params_scratch;
@@ -330,7 +335,8 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
*/
populate_pipe_ctx_dlg_params_from_dml(&context->res_ctx.pipe_ctx[dc_pipe_ctx_index], &context->bw_ctx.dml2->v20.dml_core_ctx, dml_pipe_idx);
- if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[dc_pipe_ctx_index]);
+ if (pipe_mall_type == SUBVP_PHANTOM) {
// Phantom pipe requires that DET_SIZE = 0 and no unbounded requests
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].det_buffer_size_kb = 0;
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].unbounded_req = false;
@@ -357,7 +363,7 @@ void dml2_calculate_rq_and_dlg_params(const struct dc *dc, struct dc_state *cont
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].plane_state != context->res_ctx.pipe_ctx[dc_pipe_ctx_index].top_pipe->plane_state) &&
context->res_ctx.pipe_ctx[dc_pipe_ctx_index].prev_odm_pipe == NULL) {
/* SS: all active surfaces stored in MALL */
- if (context->res_ctx.pipe_ctx[dc_pipe_ctx_index].stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe_mall_type != SUBVP_PHANTOM) {
context->bw_ctx.bw.dcn.mall_ss_size_bytes += context->res_ctx.pipe_ctx[dc_pipe_ctx_index].surface_size_in_mall_bytes;
} else {
/* SUBVP: phantom surfaces only stored in MALL */
@@ -476,7 +482,7 @@ bool dml2_verify_det_buffer_configuration(struct dml2_context *in_ctx, struct dc
return need_recalculation;
}
-bool dml2_is_stereo_timing(struct dc_stream_state *stream)
+bool dml2_is_stereo_timing(const struct dc_stream_state *stream)
{
bool is_stereo = false;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
index 23b902833..5842d6d3c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.h
@@ -42,7 +42,7 @@ void dml2_copy_clocks_to_dc_state(struct dml2_dcn_clocks *out_clks, struct dc_st
void dml2_extract_watermark_set(struct dcn_watermarks *watermark, struct display_mode_lib_st *dml_core_ctx);
int dml2_helper_find_dml_pipe_idx_by_stream_id(struct dml2_context *ctx, unsigned int stream_id);
bool is_dtbclk_required(const struct dc *dc, struct dc_state *context);
-bool dml2_is_stereo_timing(struct dc_stream_state *stream);
+bool dml2_is_stereo_timing(const struct dc_stream_state *stream);
/*
* dml2_dc_construct_pipes - This function will determine if we need additional pipes based
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
index 269bfb14c..72cca3670 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c
@@ -423,7 +423,7 @@ static int find_drr_eligible_stream(struct dc_state *display_state)
int i;
for (i = 0; i < display_state->stream_count; i++) {
- if (display_state->streams[i]->mall_stream_config.type == SUBVP_NONE
+ if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE
&& display_state->streams[i]->ignore_msa_timing_param) {
// Use ignore_msa_timing_param flag to identify as DRR
return i;
@@ -639,6 +639,8 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.b, &dml2->v20.dml_core_ctx);
memcpy(&context->bw_ctx.bw.dcn.watermarks.c, &dml2->v20.g6_temp_read_watermark_set, sizeof(context->bw_ctx.bw.dcn.watermarks.c));
dml2_extract_watermark_set(&context->bw_ctx.bw.dcn.watermarks.d, &dml2->v20.dml_core_ctx);
+ //copy for deciding zstate use
+ context->bw_ctx.dml.vba.StutterPeriod = context->bw_ctx.dml2->v20.dml_core_ctx.mp.StutterPeriod;
}
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
index 548504d7d..cc662d682 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h
@@ -93,15 +93,34 @@ struct dml2_dc_callbacks {
struct dml2_dc_svp_callbacks {
struct dc *dc;
bool (*build_scaling_params)(struct pipe_ctx *pipe_ctx);
- struct dc_stream_state* (*create_stream_for_sink)(struct dc_sink *dc_sink_data);
- struct dc_plane_state* (*create_plane)(struct dc *dc);
- enum dc_status (*add_stream_to_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
- bool (*add_plane_to_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
- bool (*remove_plane_from_context)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
- enum dc_status (*remove_stream_from_ctx)(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *stream);
- void (*plane_state_release)(struct dc_plane_state *plane_state);
- void (*stream_release)(struct dc_stream_state *stream);
+ struct dc_stream_state* (*create_phantom_stream)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *main_stream);
+ struct dc_plane_state* (*create_phantom_plane)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *main_plane);
+ enum dc_status (*add_phantom_stream)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *phantom_stream,
+ struct dc_stream_state *main_stream);
+ bool (*add_phantom_plane)(const struct dc *dc, struct dc_stream_state *stream, struct dc_plane_state *plane_state, struct dc_state *context);
+ bool (*remove_phantom_plane)(const struct dc *dc,
+ struct dc_stream_state *stream,
+ struct dc_plane_state *plane_state,
+ struct dc_state *context);
+ enum dc_status (*remove_phantom_stream)(struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
+ void (*release_phantom_plane)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_plane_state *plane);
+ void (*release_phantom_stream)(const struct dc *dc,
+ struct dc_state *state,
+ struct dc_stream_state *stream);
void (*release_dsc)(struct resource_context *res_ctx, const struct resource_pool *pool, struct display_stream_compressor **dsc);
+ enum mall_stream_type (*get_pipe_subvp_type)(const struct dc_state *state, const struct pipe_ctx *pipe_ctx);
+ enum mall_stream_type (*get_stream_subvp_type)(const struct dc_state *state, const struct dc_stream_state *stream);
+ struct dc_stream_state *(*get_paired_subvp_stream)(const struct dc_state *state, const struct dc_stream_state *stream);
};
struct dml2_clks_table_entry {
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index a2537229e..b183ba5a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,8 +1,34 @@
# SPDX-License-Identifier: MIT
#
# Makefile for the 'dsc' sub-component of DAL.
+
+ifdef CONFIG_DRM_AMD_DC_FP
+
+###############################################################################
+# DCN20
+###############################################################################
+DSC_DCN20 = dcn20_dsc.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn20/,$(DSC_DCN20))
+
+
+
+
+###############################################################################
+# DCN35
+###############################################################################
+
+DSC_DCN35 = dcn35_dsc.o
+
+AMD_DISPLAY_FILES += $(addprefix $(AMDDALPATH)/dc/dsc/dcn35/,$(DSC_DCN35))
+
+
+
+endif
+
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC))
AMD_DISPLAY_FILES += $(AMD_DAL_DSC)
+
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index e8b5f17be..0df6c55eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -331,8 +331,9 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
int buff_block_size;
int buff_size;
- if (!dsc_buff_block_size_from_dpcd(dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT],
- &buff_block_size))
+ if (!dsc_buff_block_size_from_dpcd(
+ dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] & 0x03,
+ &buff_block_size))
return false;
buff_size = dpcd_dsc_basic_data[DP_DSC_RC_BUF_SIZE - DP_DSC_SUPPORT] + 1;
@@ -357,10 +358,15 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc,
{
int dpcd_throughput = dpcd_dsc_basic_data[DP_DSC_PEAK_THROUGHPUT - DP_DSC_SUPPORT];
+ int dsc_throughput_granular_delta;
+
+ dsc_throughput_granular_delta = dpcd_dsc_basic_data[DP_DSC_RC_BUF_BLK_SIZE - DP_DSC_SUPPORT] >> 3;
+ dsc_throughput_granular_delta *= 2;
if (!dsc_throughput_from_dpcd(dpcd_throughput & DP_DSC_THROUGHPUT_MODE_0_MASK,
&dsc_sink_caps->throughput_mode_0_mps))
return false;
+ dsc_sink_caps->throughput_mode_0_mps += dsc_throughput_granular_delta;
dpcd_throughput = (dpcd_throughput & DP_DSC_THROUGHPUT_MODE_1_MASK) >> DP_DSC_THROUGHPUT_MODE_1_SHIFT;
if (!dsc_throughput_from_dpcd(dpcd_throughput, &dsc_sink_caps->throughput_mode_1_mps))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
index c9ae2d8f0..c9ae2d8f0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
index ba869387c..ba869387c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
index 71d2dff99..71d2dff99 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h
index 133ad3884..133ad3884 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn35/dcn35_dsc.h
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
index 4b27f29d0..4b27f29d0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
index bccd46bd1..254136f8e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/hwss/Makefile
@@ -78,7 +78,7 @@ ifdef CONFIG_DRM_AMD_DC_FP
# DCN
###############################################################################
-HWSS_DCN10 = dcn10_hwseq.o
+HWSS_DCN10 = dcn10_hwseq.o dcn10_init.o
AMD_DAL_HWSS_DCN10 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn10/,$(HWSS_DCN10))
@@ -86,7 +86,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN10)
###############################################################################
-HWSS_DCN20 = dcn20_hwseq.o
+HWSS_DCN20 = dcn20_hwseq.o dcn20_init.o
AMD_DAL_HWSS_DCN20 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn20/,$(HWSS_DCN20))
@@ -94,7 +94,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN20)
###############################################################################
-HWSS_DCN201 = dcn201_hwseq.o
+HWSS_DCN201 = dcn201_hwseq.o dcn201_init.o
AMD_DAL_HWSS_DCN201 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn201/,$(HWSS_DCN201))
@@ -102,7 +102,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN201)
###############################################################################
-HWSS_DCN21 = dcn21_hwseq.o
+HWSS_DCN21 = dcn21_hwseq.o dcn21_init.o
AMD_DAL_HWSS_DCN21 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn21/,$(HWSS_DCN21))
@@ -114,7 +114,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN21)
###############################################################################
-HWSS_DCN30 = dcn30_hwseq.o
+HWSS_DCN30 = dcn30_hwseq.o dcn30_init.o
AMD_DAL_HWSS_DCN30 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn30/,$(HWSS_DCN30))
@@ -122,7 +122,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN30)
###############################################################################
-HWSS_DCN301 = dcn301_hwseq.o
+HWSS_DCN301 = dcn301_hwseq.o dcn301_init.o
AMD_DAL_HWSS_DCN301 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn301/,$(HWSS_DCN301))
@@ -130,15 +130,17 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN301)
###############################################################################
-HWSS_DCN302 = dcn302_hwseq.o
+HWSS_DCN302 = dcn302_hwseq.o dcn302_init.o
AMD_DAL_HWSS_DCN302 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn302/,$(HWSS_DCN302))
AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN302)
+
+
###############################################################################
-HWSS_DCN303 = dcn303_hwseq.o
+HWSS_DCN303 = dcn303_hwseq.o dcn303_init.o
AMD_DAL_HWSS_DCN303 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn303/,$(HWSS_DCN303))
@@ -146,7 +148,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN303)
###############################################################################
-HWSS_DCN31 = dcn31_hwseq.o
+HWSS_DCN31 = dcn31_hwseq.o dcn31_init.o
AMD_DAL_HWSS_DCN31 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn31/,$(HWSS_DCN31))
@@ -154,7 +156,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN31)
###############################################################################
-HWSS_DCN314 = dcn314_hwseq.o
+HWSS_DCN314 = dcn314_hwseq.o dcn314_init.o
AMD_DAL_HWSS_DCN314 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn314/,$(HWSS_DCN314))
@@ -162,7 +164,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN314)
###############################################################################
-HWSS_DCN32 = dcn32_hwseq.o
+HWSS_DCN32 = dcn32_hwseq.o dcn32_init.o
AMD_DAL_HWSS_DCN32 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn32/,$(HWSS_DCN32))
@@ -170,7 +172,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN32)
###############################################################################
-HWSS_DCN35 = dcn35_hwseq.o
+HWSS_DCN35 = dcn35_hwseq.o dcn35_init.o
AMD_DAL_HWSS_DCN35 = $(addprefix $(AMDDALPATH)/dc/hwss/dcn35/,$(HWSS_DCN35))
@@ -180,4 +182,4 @@ AMD_DISPLAY_FILES += $(AMD_DAL_HWSS_DCN35)
###############################################################################
-endif \ No newline at end of file
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
index 44b4df646..52f045cfd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce/dce_hwseq.h
@@ -682,6 +682,7 @@ struct dce_hwseq_registers {
uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
uint32_t HPO_TOP_HW_CONTROL;
uint32_t DMU_CLK_CNTL;
+ uint32_t DCCG_GATE_DISABLE_CNTL4;
uint32_t DCCG_GATE_DISABLE_CNTL5;
};
/* set field name */
@@ -1199,7 +1200,19 @@ struct dce_hwseq_registers {
type PHYBSYMCLK_ROOT_GATE_DISABLE;\
type PHYCSYMCLK_ROOT_GATE_DISABLE;\
type PHYDSYMCLK_ROOT_GATE_DISABLE;\
- type PHYESYMCLK_ROOT_GATE_DISABLE;
+ type PHYESYMCLK_ROOT_GATE_DISABLE;\
+ type DTBCLK_P0_GATE_DISABLE;\
+ type DTBCLK_P1_GATE_DISABLE;\
+ type DTBCLK_P2_GATE_DISABLE;\
+ type DTBCLK_P3_GATE_DISABLE;\
+ type DPSTREAMCLK0_GATE_DISABLE;\
+ type DPSTREAMCLK1_GATE_DISABLE;\
+ type DPSTREAMCLK2_GATE_DISABLE;\
+ type DPSTREAMCLK3_GATE_DISABLE;\
+ type DPIASYMCLK0_GATE_DISABLE;\
+ type DPIASYMCLK1_GATE_DISABLE;\
+ type DPIASYMCLK2_GATE_DISABLE;\
+ type DPIASYMCLK3_GATE_DISABLE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 3642c069b..12af6bb9f 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -55,6 +55,7 @@
#include "audio.h"
#include "reg_helper.h"
#include "panel_cntl.h"
+#include "dc_state_priv.h"
#include "dpcd_defs.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
@@ -1476,7 +1477,7 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
-static enum dc_status apply_single_controller_ctx_to_hw(
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
struct dc_state *context,
struct dc *dc)
@@ -1597,7 +1598,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
* is constructed with the same sink). Make sure not to override
* and link programming on the main.
*/
- if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM) {
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
pipe_ctx->stream->link->replay_settings.replay_feature_enabled = false;
}
@@ -1685,7 +1686,7 @@ static void disable_vga_and_power_gate_all_controllers(
true);
dc->current_state->res_ctx.pipe_ctx[i].pipe_idx = i;
- dc->hwss.disable_plane(dc,
+ dc->hwss.disable_plane(dc, dc->current_state,
&dc->current_state->res_ctx.pipe_ctx[i]);
}
}
@@ -2135,7 +2136,7 @@ static void dce110_reset_hw_ctx_wrap(
old_clk))
old_clk->funcs->cs_power_down(old_clk);
- dc->hwss.disable_plane(dc, pipe_ctx_old);
+ dc->hwss.disable_plane(dc, dc->current_state, pipe_ctx_old);
pipe_ctx_old->stream = NULL;
}
@@ -2302,7 +2303,7 @@ enum dc_status dce110_apply_ctx_to_hw(
if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
continue;
- status = apply_single_controller_ctx_to_hw(
+ status = dce110_apply_single_controller_ctx_to_hw(
pipe_ctx,
context,
dc);
@@ -2499,6 +2500,7 @@ static bool wait_for_reset_trigger_to_occur(
/* Enable timing synchronization for a group of Timing Generators. */
static void dce110_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -2592,6 +2594,7 @@ static void init_hw(struct dc *dc)
struct dmcu *dmcu;
struct dce_hwseq *hws = dc->hwseq;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bp = dc->ctx->dc_bios;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -2641,13 +2644,15 @@ static void init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
abm = dc->res_pool->abm;
if (abm != NULL)
- abm->funcs->abm_init(abm, backlight);
+ abm->funcs->abm_init(abm, backlight, user_level);
dmcu = dc->res_pool->dmcu;
if (dmcu != NULL && abm != NULL)
@@ -2844,7 +2849,7 @@ static void dce110_post_unlock_program_front_end(
{
}
-static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
+static void dce110_power_down_fe(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
int fe_idx = pipe_ctx->plane_res.mi ?
@@ -3117,7 +3122,8 @@ void dce110_disable_link_output(struct dc_link *link,
struct dmcu *dmcu = dc->res_pool->dmcu;
if (signal == SIGNAL_TYPE_EDP &&
- link->dc->hwss.edp_backlight_control)
+ link->dc->hwss.edp_backlight_control &&
+ !link->skip_implict_edp_power_control)
link->dc->hwss.edp_backlight_control(link, false);
else if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
index 08028a177..ed3cc3648 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h
@@ -39,6 +39,10 @@ enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
+enum dc_status dce110_apply_single_controller_ctx_to_hw(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
void dce110_enable_stream(struct pipe_ctx *pipe_ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 2b3ef5cdb..c45f84aa3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -56,6 +56,7 @@
#include "dc_trace.h"
#include "dce/dmub_outbox.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc_logger
@@ -115,7 +116,7 @@ void dcn10_lock_all_pipes(struct dc *dc,
!pipe_ctx->stream ||
(!pipe_ctx->plane_state && !old_pipe_ctx->plane_state) ||
!tg->funcs->is_tg_enabled(tg) ||
- pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM)
continue;
if (lock)
@@ -1181,7 +1182,9 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
@@ -1201,7 +1204,7 @@ void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
// Phantom pipes have OTG disabled by default, so MPCC_STATUS will never assert idle,
// so don't wait for MPCC_IDLE in the programming sequence
- if (opp != NULL && !pipe_ctx->plane_state->is_phantom)
+ if (opp != NULL && dc_state_get_pipe_subvp_type(state, pipe_ctx) != SUBVP_PHANTOM)
opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
dc->optimized_required = true;
@@ -1291,7 +1294,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -1417,12 +1420,12 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -1487,6 +1490,7 @@ void dcn10_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
bool is_optimized_init_done = false;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -1584,12 +1588,14 @@ void dcn10_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
if (abm != NULL)
- abm->funcs->abm_init(abm, backlight);
+ abm->funcs->abm_init(abm, backlight, user_level);
if (dmcu != NULL && !dmcu->auto_load_dmcu)
dmcu->funcs->dmcu_init(dmcu);
@@ -2266,6 +2272,7 @@ void dcn10_enable_vblanks_synchronization(
void dcn10_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[])
@@ -2280,7 +2287,7 @@ void dcn10_enable_timing_synchronization(
DC_SYNC_INFO("Setting up OTG reset trigger\n");
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
@@ -2300,14 +2307,14 @@ void dcn10_enable_timing_synchronization(
if (grouped_pipes[i]->stream == NULL)
continue;
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream->vblank_synchronized = false;
}
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
@@ -2321,11 +2328,11 @@ void dcn10_enable_timing_synchronization(
* synchronized. Look at last pipe programmed to reset.
*/
- if (grouped_pipes[1]->stream && grouped_pipes[1]->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (grouped_pipes[1]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[1]) != SUBVP_PHANTOM)
wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (grouped_pipes[i]->stream && dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
@@ -2333,7 +2340,7 @@ void dcn10_enable_timing_synchronization(
}
for (i = 1; i < group_size; i++) {
- if (grouped_pipes[i]->stream && grouped_pipes[i]->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (dc_state_get_pipe_subvp_type(state, grouped_pipes[i]) == SUBVP_PHANTOM)
continue;
opp = grouped_pipes[i]->stream_res.opp;
@@ -3025,7 +3032,7 @@ void dcn10_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
@@ -3072,7 +3079,7 @@ void dcn10_prepare_bandwidth(
context,
false);
- dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
index ef6d56da4..bc5dd68a2 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.h
@@ -75,7 +75,7 @@ void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
-void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn10_lock_all_pipes(
struct dc *dc,
struct dc_state *context,
@@ -108,13 +108,16 @@ void dcn10_power_down_on_boot(struct dc *dc);
enum dc_status dce110_apply_ctx_to_hw(
struct dc *dc,
struct dc_state *context);
-void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx);
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data);
void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx);
void dce110_power_down(struct dc *dc);
void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context);
void dcn10_enable_timing_synchronization(
struct dc *dc,
+ struct dc_state *state,
int group_index,
int group_size,
struct pipe_ctx *grouped_pipes[]);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
index a5bdac79a..a5bdac79a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h
index 8c6fd7b84..8c6fd7b84 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
index e3f547e06..868a086c7 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
@@ -55,6 +55,7 @@
#include "inc/link_enc_cfg.h"
#include "link_hwss.h"
#include "link.h"
+#include "dc_state_priv.h"
#define DC_LOGGER \
dc_logger
@@ -623,9 +624,9 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
}
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
- bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -847,7 +848,7 @@ enum dc_status dcn20_enable_stream_timing(
/* TODO enable stream if timing changed */
/* TODO unblank stream if DP */
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_PHANTOM) {
if (pipe_ctx->stream_res.tg && pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable)
pipe_ctx->stream_res.tg->funcs->phantom_crtc_post_enable(pipe_ctx->stream_res.tg);
}
@@ -1368,8 +1369,14 @@ void dcn20_pipe_control_lock(
}
}
-static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe)
+static void dcn20_detect_pipe_changes(struct dc_state *old_state,
+ struct dc_state *new_state,
+ struct pipe_ctx *old_pipe,
+ struct pipe_ctx *new_pipe)
{
+ bool old_is_phantom = dc_state_get_pipe_subvp_type(old_state, old_pipe) == SUBVP_PHANTOM;
+ bool new_is_phantom = dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM;
+
new_pipe->update_flags.raw = 0;
/* If non-phantom pipe is being transitioned to a phantom pipe,
@@ -1379,8 +1386,8 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
* be different). The post_unlock sequence will set the correct
* update flags to enable the phantom pipe.
*/
- if (old_pipe->plane_state && !old_pipe->plane_state->is_phantom &&
- new_pipe->plane_state && new_pipe->plane_state->is_phantom) {
+ if (old_pipe->plane_state && !old_is_phantom &&
+ new_pipe->plane_state && new_is_phantom) {
new_pipe->update_flags.bits.disable = 1;
return;
}
@@ -1405,6 +1412,10 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
new_pipe->update_flags.bits.scaler = 1;
new_pipe->update_flags.bits.viewport = 1;
new_pipe->update_flags.bits.det_size = 1;
+ if (new_pipe->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE &&
+ new_pipe->stream_res.test_pattern_params.width != 0 &&
+ new_pipe->stream_res.test_pattern_params.height != 0)
+ new_pipe->update_flags.bits.test_pattern_changed = 1;
if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) {
new_pipe->update_flags.bits.odm = 1;
new_pipe->update_flags.bits.global_sync = 1;
@@ -1417,14 +1428,14 @@ static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx
* The remove-add sequence of the phantom pipe always results in the pipe
* being blanked in enable_stream_timing (DPG).
*/
- if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM)
+ if (new_pipe->stream && dc_state_get_pipe_subvp_type(new_state, new_pipe) == SUBVP_PHANTOM)
new_pipe->update_flags.bits.enable = 1;
/* Phantom pipes are effectively disabled, if the pipe was previously phantom
* we have to enable
*/
- if (old_pipe->plane_state && old_pipe->plane_state->is_phantom &&
- new_pipe->plane_state && !new_pipe->plane_state->is_phantom)
+ if (old_pipe->plane_state && old_is_phantom &&
+ new_pipe->plane_state && !new_is_phantom)
new_pipe->update_flags.bits.enable = 1;
if (old_pipe->plane_state && !new_pipe->plane_state) {
@@ -1557,6 +1568,7 @@ static void dcn20_update_dchubp_dpp(
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct dccg *dccg = dc->res_pool->dccg;
bool viewport_changed = false;
+ enum mall_stream_type pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe_ctx);
if (pipe_ctx->update_flags.bits.dppclk)
dpp->funcs->dpp_dppclk_control(dpp, false, true);
@@ -1702,7 +1714,7 @@ static void dcn20_update_dchubp_dpp(
pipe_ctx->update_flags.bits.plane_changed ||
plane_state->update_flags.bits.addr_update) {
if (resource_is_pipe_type(pipe_ctx, OTG_MASTER) &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ pipe_mall_type == SUBVP_MAIN) {
union block_sequence_params params;
params.subvp_save_surf_addr.dc_dmub_srv = dc->ctx->dmub_srv;
@@ -1716,7 +1728,7 @@ static void dcn20_update_dchubp_dpp(
if (pipe_ctx->update_flags.bits.enable)
hubp->funcs->set_blank(hubp, false);
/* If the stream paired with this plane is phantom, the plane is also phantom */
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.type == SUBVP_PHANTOM
+ if (pipe_ctx->stream && pipe_mall_type == SUBVP_PHANTOM
&& hubp->funcs->phantom_hubp_post_enable)
hubp->funcs->phantom_hubp_post_enable(hubp);
}
@@ -1774,7 +1786,7 @@ static void dcn20_program_pipe(
pipe_ctx->pipe_dlg_param.vupdate_offset,
pipe_ctx->pipe_dlg_param.vupdate_width);
- if (pipe_ctx->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE);
pipe_ctx->stream_res.tg->funcs->set_vtg_params(
@@ -1914,7 +1926,7 @@ void dcn20_program_front_end_for_ctx(
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
- dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
+ dcn20_detect_pipe_changes(dc->current_state, context, &dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
/* When disabling phantom pipes, turn on phantom OTG first (so we can get double
@@ -1924,15 +1936,16 @@ void dcn20_program_front_end_for_ctx(
struct dc_stream_state *stream = dc->current_state->res_ctx.pipe_ctx[i].stream;
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable && stream &&
- dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM) {
struct timing_generator *tg = dc->current_state->res_ctx.pipe_ctx[i].stream_res.tg;
if (tg->funcs->enable_crtc) {
if (dc->hwss.blank_phantom) {
int main_pipe_width, main_pipe_height;
+ struct dc_stream_state *phantom_stream = dc_state_get_paired_subvp_stream(dc->current_state, dc->current_state->res_ctx.pipe_ctx[i].stream);
- main_pipe_width = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.width;
- main_pipe_height = dc->current_state->res_ctx.pipe_ctx[i].stream->mall_stream_config.paired_stream->dst.height;
+ main_pipe_width = phantom_stream->dst.width;
+ main_pipe_height = phantom_stream->dst.height;
dc->hwss.blank_phantom(dc, tg, main_pipe_width, main_pipe_height);
}
tg->funcs->enable_crtc(tg);
@@ -1961,9 +1974,9 @@ void dcn20_program_front_end_for_ctx(
* DET allocation.
*/
if (hubbub->funcs->program_det_size && (context->res_ctx.pipe_ctx[i].update_flags.bits.disable ||
- (context->res_ctx.pipe_ctx[i].plane_state && context->res_ctx.pipe_ctx[i].plane_state->is_phantom)))
+ (context->res_ctx.pipe_ctx[i].plane_state && dc_state_get_pipe_subvp_type(context, &context->res_ctx.pipe_ctx[i]) == SUBVP_PHANTOM)))
hubbub->funcs->program_det_size(hubbub, dc->current_state->res_ctx.pipe_ctx[i].plane_res.hubp->inst, 0);
- hws->funcs.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ hws->funcs.plane_atomic_disconnect(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx);
}
@@ -1996,7 +2009,7 @@ void dcn20_program_front_end_for_ctx(
* but the MPO still exists until the double buffered update of the main pipe so we
* will get a frame of underflow if the phantom pipe is programmed here.
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM)
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM)
dcn20_program_pipe(dc, pipe, context);
}
@@ -2035,7 +2048,7 @@ void dcn20_post_unlock_program_front_end(
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
- dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
+ dc->hwss.disable_plane(dc, dc->current_state, &dc->current_state->res_ctx.pipe_ctx[i]);
/*
* If we are enabling a pipe, we need to wait for pending clear as this is a critical
@@ -2047,7 +2060,7 @@ void dcn20_post_unlock_program_front_end(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// Don't check flip pending on phantom pipes
if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable &&
- pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_PHANTOM) {
struct hubp *hubp = pipe->plane_res.hubp;
int j = 0;
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_US / polling_interval_us
@@ -2070,7 +2083,7 @@ void dcn20_post_unlock_program_front_end(
* programming sequence).
*/
while (pipe) {
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
/* When turning on the phantom pipe we want to run through the
* entire enable sequence, so apply all the "enable" flags.
*/
@@ -2140,17 +2153,17 @@ void dcn20_prepare_bandwidth(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't restore the original watermark value
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
}
/* program dchubbub watermarks:
- * For assigning wm_optimized_required, use |= operator since we don't want
+ * For assigning optimized_required, use |= operator since we don't want
* to clear the value if the optimize has not happened yet
*/
- dc->wm_optimized_required |= hubbub->funcs->program_watermarks(hubbub,
+ dc->optimized_required |= hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2163,10 +2176,10 @@ void dcn20_prepare_bandwidth(
if (hubbub->funcs->program_compbuf_size) {
if (context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes) {
compbuf_size_kb = context->bw_ctx.dml.ip.min_comp_buffer_size_kbytes;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.dml.ip.min_comp_buffer_size_kbytes);
} else {
compbuf_size_kb = context->bw_ctx.bw.dcn.compbuf_size_kb;
- dc->wm_optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
+ dc->optimized_required |= (compbuf_size_kb != dc->current_state->bw_ctx.bw.dcn.compbuf_size_kb);
}
hubbub->funcs->program_compbuf_size(hubbub, compbuf_size_kb, false);
@@ -2184,7 +2197,7 @@ void dcn20_optimize_bandwidth(
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
// At optimize don't need to restore the original watermark value
- if (pipe->stream && pipe->stream->mall_stream_config.type != SUBVP_NONE) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) != SUBVP_NONE) {
context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = 4U * 1000U * 1000U * 1000U;
break;
}
@@ -2218,7 +2231,8 @@ void dcn20_optimize_bandwidth(
dc->clk_mgr,
context,
true);
- if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW) {
+ if (context->bw_ctx.bw.dcn.clk.zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
+ !dc->debug.disable_extblankadj) {
for (i = 0; i < dc->res_pool->pipe_count; ++i) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
@@ -2331,7 +2345,7 @@ bool dcn20_wait_for_blank_complete(
int counter;
for (counter = 0; counter < 1000; counter++) {
- if (opp->funcs->dpg_is_blanked(opp))
+ if (!opp->funcs->dpg_is_pending(opp))
break;
udelay(100);
@@ -2342,7 +2356,7 @@ bool dcn20_wait_for_blank_complete(
return false;
}
- return true;
+ return opp->funcs->dpg_is_blanked(opp);
}
bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx)
@@ -2548,7 +2562,7 @@ void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
tg->funcs->setup_vertical_interrupt2(tg, start_line);
}
-static void dcn20_reset_back_end_for_pipe(
+void dcn20_reset_back_end_for_pipe(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
struct dc_state *context)
@@ -2944,7 +2958,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
/*to do*/
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -2961,7 +2975,7 @@ void dcn20_fpga_init_hw(struct dc *dc)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
index ab02e4e9c..d950b3e54 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.h
@@ -52,7 +52,7 @@ void dcn20_program_output_csc(struct dc *dc,
void dcn20_enable_stream(struct pipe_ctx *pipe_ctx);
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
-void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn20_disable_pixel_data(
struct dc *dc,
struct pipe_ctx *pipe_ctx,
@@ -84,6 +84,10 @@ enum dc_status dcn20_enable_stream_timing(
void dcn20_disable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_enable_stream_gating(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn20_reset_back_end_for_pipe(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
void dcn20_init_blank(
struct dc *dc,
struct timing_generator *tg);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
index 884e3e323..884e3e323 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h
index 12277797c..12277797c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
index d3fe6092f..d5769f388 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.c
@@ -320,7 +320,7 @@ void dcn201_init_hw(struct dc *dc)
res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = res_pool->opps[i];
/*To do: number of MPCC != number of opp*/
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
}
/* initialize DWB pointer to MCIF_WB */
@@ -337,7 +337,7 @@ void dcn201_init_hw(struct dc *dc)
for (i = 0; i < res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -369,7 +369,9 @@ void dcn201_init_hw(struct dc *dc)
}
/* trigger HW to start disconnect plane from stream on the next vsync */
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn201_plane_atomic_disconnect(struct dc *dc,
+ struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
index 26cd62be6..6a50a9894 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_hwseq.h
@@ -33,7 +33,7 @@ void dcn201_init_hw(struct dc *dc);
void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings);
void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
-void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
void dcn201_pipe_control_lock(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
index a13bf6c93..a13bf6c93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h
index 1168887b0..1168887b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn201/dcn201_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
index 5c7f380a8..7252f5f78 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
@@ -211,7 +211,7 @@ void dcn21_set_pipe(struct pipe_ctx *pipe_ctx)
struct dmcu *dmcu = pipe_ctx->stream->ctx->dc->res_pool->dmcu;
uint32_t otg_inst;
- if (!abm && !tg && !panel_cntl)
+ if (!abm || !tg || !panel_cntl)
return;
otg_inst = tg->inst;
@@ -245,7 +245,7 @@ bool dcn21_set_backlight_level(struct pipe_ctx *pipe_ctx,
struct panel_cntl *panel_cntl = pipe_ctx->stream->link->panel_cntl;
uint32_t otg_inst;
- if (!abm && !tg && !panel_cntl)
+ if (!abm || !tg || !panel_cntl)
return false;
otg_inst = tg->inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
index 18249c6b6..18249c6b6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h
index 3ed242926..3ed242926 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index c89149d15..55cf4c9e6 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -51,7 +51,7 @@
#include "dcn20/dcn20_hwseq.h"
#include "dcn30/dcn30_resource.h"
#include "link.h"
-
+#include "dc_state_priv.h"
@@ -367,6 +367,10 @@ void dcn30_enable_writeback(
DC_LOG_DWB("%s dwb_pipe_inst = %d, mpcc_inst = %d",\
__func__, wb_info->dwb_pipe_inst,\
wb_info->mpcc_inst);
+
+ /* Warmup interface */
+ dcn30_mmhubbub_warmup(dc, 1, wb_info);
+
/* Update writeback pipe */
dcn30_set_writeback(dc, wb_info, context);
@@ -472,6 +476,7 @@ void dcn30_init_hw(struct dc *dc)
int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -608,13 +613,15 @@ void dcn30_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -972,7 +979,7 @@ void dcn30_hardware_release(struct dc *dc)
if (!pipe->stream)
continue;
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe) == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
index 9894caedf..9894caedf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h
index c280ff90b..c280ff90b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
index 6477009ce..6477009ce 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
index 0bca48ccb..0bca48ccb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn301/dcn301_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c
index 637f9514d..637f9514d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h
index 899587b93..899587b93 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn302/dcn302_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c
index edb4d68b8..edb4d68b8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h
index 494998112..494998112 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn303/dcn303_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index 3a70a3cbc..7423880fa 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -96,7 +96,8 @@ static void enable_memory_low_power(struct dc *dc)
if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
// Power down VPGs
for (i = 0; i < dc->res_pool->stream_enc_count; i++)
- dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+ if (dc->res_pool->stream_enc[i]->vpg)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
#if defined(CONFIG_DRM_AMD_DC_FP)
for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
@@ -112,6 +113,7 @@ void dcn31_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -223,13 +225,15 @@ void dcn31_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
index 669f524bd..669f524bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h
index a3db08c8b..a3db08c8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
index ccb7e317e..ccb7e317e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h
index 8f92e6657..8f92e6657 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
index 580afb008..766822943 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
@@ -51,6 +51,7 @@
#include "dcn32/dcn32_resource.h"
#include "link.h"
#include "../dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger)
@@ -348,8 +349,7 @@ void dcn32_commit_subvp_config(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- if (pipe_ctx->stream && pipe_ctx->stream->mall_stream_config.paired_stream &&
- pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) == SUBVP_MAIN) {
// There is at least 1 SubVP pipe, so enable SubVP
enable_subvp = true;
break;
@@ -375,18 +375,20 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
bool subvp_immediate_flip = false;
bool subvp_in_use = false;
struct pipe_ctx *pipe;
+ enum mall_stream_type pipe_mall_type = SUBVP_NONE;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
+ pipe_mall_type = dc_state_get_pipe_subvp_type(context, pipe);
- if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN) {
subvp_in_use = true;
break;
}
}
if (top_pipe_to_program && top_pipe_to_program->stream && top_pipe_to_program->plane_state) {
- if (top_pipe_to_program->stream->mall_stream_config.type == SUBVP_MAIN &&
+ if (dc_state_get_pipe_subvp_type(context, top_pipe_to_program) == SUBVP_MAIN &&
top_pipe_to_program->plane_state->flip_immediate)
subvp_immediate_flip = true;
}
@@ -398,7 +400,7 @@ void dcn32_subvp_pipe_control_lock(struct dc *dc,
if (!lock) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->plane_state && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
+ if (pipe->stream && pipe->plane_state && pipe_mall_type == SUBVP_MAIN &&
should_lock_all_pipes)
pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg, CRTC_STATE_VBLANK);
}
@@ -416,14 +418,7 @@ void dcn32_subvp_pipe_control_lock_fast(union block_sequence_params *params)
{
struct dc *dc = params->subvp_pipe_control_lock_fast_params.dc;
bool lock = params->subvp_pipe_control_lock_fast_params.lock;
- struct pipe_ctx *pipe_ctx = params->subvp_pipe_control_lock_fast_params.pipe_ctx;
- bool subvp_immediate_flip = false;
-
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->plane_state) {
- if (pipe_ctx->stream->mall_stream_config.type == SUBVP_MAIN &&
- pipe_ctx->plane_state->flip_immediate)
- subvp_immediate_flip = true;
- }
+ bool subvp_immediate_flip = params->subvp_pipe_control_lock_fast_params.subvp_immediate_flip;
// Don't need to lock for DRR VSYNC flips -- FW will wait for DRR pending update cleared.
if (subvp_immediate_flip) {
@@ -609,7 +604,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (!pipe->stream || !(pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ if (!pipe->stream || !(dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use)) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, false);
@@ -624,7 +619,7 @@ void dcn32_update_force_pstate(struct dc *dc, struct dc_state *context)
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
struct hubp *hubp = pipe->plane_res.hubp;
- if (pipe->stream && (pipe->stream->mall_stream_config.type == SUBVP_MAIN ||
+ if (pipe->stream && (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN ||
pipe->stream->fpo_in_use)) {
if (hubp && hubp->funcs->hubp_update_force_pstate_disallow)
hubp->funcs->hubp_update_force_pstate_disallow(hubp, true);
@@ -671,8 +666,8 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context)
if (cursor_size > 16384)
cache_cursor = true;
- if (pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
+ hubp->funcs->hubp_update_mall_sel(hubp, 1, false);
} else {
// MALL not supported with Stereo3D
hubp->funcs->hubp_update_mall_sel(hubp,
@@ -714,9 +709,8 @@ void dcn32_program_mall_pipe_config(struct dc *dc, struct dc_state *context)
* see if CURSOR_REQ_MODE will be back to 1 for SubVP
* when it should be 0 for MPO
*/
- if (pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
+ if (dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN)
hubp->funcs->hubp_prepare_subvp_buffering(hubp, true);
- }
}
}
}
@@ -759,6 +753,7 @@ void dcn32_init_hw(struct dc *dc)
int i;
int edp_num;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
@@ -913,13 +908,15 @@ void dcn32_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL && abms[i]->funcs != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
@@ -1194,7 +1191,7 @@ void dcn32_resync_fifo_dccg_dio(struct dce_hwseq *hws, struct dc *dc, struct dc_
continue;
if ((pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal))
- && pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ && dc_state_get_pipe_subvp_type(dc->current_state, pipe) != SUBVP_PHANTOM) {
pipe->stream_res.tg->funcs->disable_crtc(pipe->stream_res.tg);
reset_sync_context_for_pipe(dc, context, i);
otg_disabled[i] = true;
@@ -1345,8 +1342,8 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN &&
- pipe->stream->mall_stream_config.paired_stream == phantom_pipe->stream) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_MAIN &&
+ dc_state_get_paired_subvp_stream(context, pipe->stream) == phantom_pipe->stream) {
if (pipe->plane_state && pipe->plane_state->update_flags.bits.position_change) {
phantom_plane->src_rect.x = pipe->plane_state->src_rect.x;
@@ -1371,21 +1368,19 @@ void dcn32_update_phantom_vp_position(struct dc *dc,
void dcn32_apply_update_flags_for_phantom(struct pipe_ctx *phantom_pipe)
{
phantom_pipe->update_flags.raw = 0;
- if (phantom_pipe->stream && phantom_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
- phantom_pipe->update_flags.bits.enable = 1;
- phantom_pipe->update_flags.bits.mpcc = 1;
- phantom_pipe->update_flags.bits.dppclk = 1;
- phantom_pipe->update_flags.bits.hubp_interdependent = 1;
- phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
- phantom_pipe->update_flags.bits.gamut_remap = 1;
- phantom_pipe->update_flags.bits.scaler = 1;
- phantom_pipe->update_flags.bits.viewport = 1;
- phantom_pipe->update_flags.bits.det_size = 1;
- if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
- phantom_pipe->update_flags.bits.odm = 1;
- phantom_pipe->update_flags.bits.global_sync = 1;
- }
+ if (resource_is_pipe_type(phantom_pipe, DPP_PIPE)) {
+ phantom_pipe->update_flags.bits.enable = 1;
+ phantom_pipe->update_flags.bits.mpcc = 1;
+ phantom_pipe->update_flags.bits.dppclk = 1;
+ phantom_pipe->update_flags.bits.hubp_interdependent = 1;
+ phantom_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1;
+ phantom_pipe->update_flags.bits.gamut_remap = 1;
+ phantom_pipe->update_flags.bits.scaler = 1;
+ phantom_pipe->update_flags.bits.viewport = 1;
+ phantom_pipe->update_flags.bits.det_size = 1;
+ if (resource_is_pipe_type(phantom_pipe, OTG_MASTER)) {
+ phantom_pipe->update_flags.bits.odm = 1;
+ phantom_pipe->update_flags.bits.global_sync = 1;
}
}
}
@@ -1445,9 +1440,44 @@ void dcn32_update_dsc_pg(struct dc *dc,
}
}
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ int i;
+
+ for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx_old->stream)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(dc->current_state, pipe_ctx_old) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx_old->top_pipe || pipe_ctx_old->prev_odm_pipe)
+ continue;
+
+ if (!pipe_ctx->stream || pipe_need_reprogram(pipe_ctx_old, pipe_ctx) ||
+ (pipe_ctx->stream && dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)) {
+ struct clock_source *old_clk = pipe_ctx_old->clock_source;
+
+ if (hws->funcs.reset_back_end_for_pipe)
+ hws->funcs.reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
+ if (hws->funcs.enable_stream_gating)
+ hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
+ if (old_clk)
+ old_clk->funcs->cs_power_down(old_clk);
+ }
+ }
+}
+
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
{
unsigned int i;
+ enum dc_status status = DC_OK;
+ struct dce_hwseq *hws = dc->hwseq;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
@@ -1457,8 +1487,8 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
* pipe, wait for the double buffer update to complete first before we do
* ANY phantom pipe programming.
*/
- if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM &&
- old_pipe->stream && old_pipe->stream->mall_stream_config.type != SUBVP_PHANTOM) {
+ if (pipe->stream && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM &&
+ old_pipe->stream && dc_state_get_pipe_subvp_type(dc->current_state, old_pipe) != SUBVP_PHANTOM) {
old_pipe->stream_res.tg->funcs->wait_for_state(
old_pipe->stream_res.tg,
CRTC_STATE_VBLANK);
@@ -1468,16 +1498,39 @@ void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context)
}
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
-
- if (new_pipe->stream && new_pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- // If old context or new context has phantom pipes, apply
- // the phantom timings now. We can't change the phantom
- // pipe configuration safely without driver acquiring
- // the DMCUB lock first.
- dc->hwss.apply_ctx_to_hw(dc, context);
- break;
+ struct pipe_ctx *pipe_ctx_old =
+ &dc->current_state->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx->stream == NULL)
+ continue;
+
+ if (dc_state_get_pipe_subvp_type(context, pipe_ctx) != SUBVP_PHANTOM)
+ continue;
+
+ if (pipe_ctx->stream == pipe_ctx_old->stream &&
+ pipe_ctx->stream->link->link_state_valid) {
+ continue;
}
+
+ if (pipe_ctx_old->stream && !pipe_need_reprogram(pipe_ctx_old, pipe_ctx))
+ continue;
+
+ if (pipe_ctx->top_pipe || pipe_ctx->prev_odm_pipe)
+ continue;
+
+ if (hws->funcs.apply_single_controller_ctx_to_hw)
+ status = hws->funcs.apply_single_controller_ctx_to_hw(
+ pipe_ctx,
+ context,
+ dc);
+
+ ASSERT(status == DC_OK);
+
+#ifdef CONFIG_DRM_AMD_DC_FP
+ if (hws->funcs.resync_fifo_dccg_dio)
+ hws->funcs.resync_fifo_dccg_dio(hws, dc, context);
+#endif
}
}
@@ -1691,3 +1744,26 @@ void dcn32_prepare_bandwidth(struct dc *dc,
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
}
}
+
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock)
+{
+ unsigned int i;
+ struct pipe_ctx *pipe;
+ struct timing_generator *tg;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ pipe = &context->res_ctx.pipe_ctx[i];
+ tg = pipe->stream_res.tg;
+
+ if (!resource_is_pipe_type(pipe, OTG_MASTER) ||
+ !tg->funcs->is_tg_enabled(tg) ||
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM)
+ continue;
+
+ if (lock)
+ dc->hwss.pipe_control_lock(dc, pipe, true);
+ else
+ dc->hwss.pipe_control_lock(dc, pipe, false);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
index cecf7f0f5..f55c11fc5 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h
@@ -111,6 +111,8 @@ void dcn32_update_dsc_pg(struct dc *dc,
void dcn32_enable_phantom_streams(struct dc *dc, struct dc_state *context);
+void dcn32_disable_phantom_streams(struct dc *dc, struct dc_state *context);
+
void dcn32_init_blank(
struct dc *dc,
struct timing_generator *tg);
@@ -127,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc,
void dcn32_prepare_bandwidth(struct dc *dc,
struct dc_state *context);
+void dcn32_interdependent_update_lock(struct dc *dc,
+ struct dc_state *context, bool lock);
#endif /* __DC_HWSS_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
index 427cfc8c2..03253faea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c
@@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.disable_plane = dcn20_disable_plane,
.disable_pixel_data = dcn20_disable_pixel_data,
.pipe_control_lock = dcn20_pipe_control_lock,
- .interdependent_update_lock = dcn10_lock_all_pipes,
+ .interdependent_update_lock = dcn32_interdependent_update_lock,
.cursor_lock = dcn10_cursor_lock,
.prepare_bandwidth = dcn32_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
@@ -109,6 +109,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = {
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.commit_subvp_config = dcn32_commit_subvp_config,
.enable_phantom_streams = dcn32_enable_phantom_streams,
+ .disable_phantom_streams = dcn32_disable_phantom_streams,
.subvp_pipe_control_lock = dcn32_subvp_pipe_control_lock,
.update_visual_confirm_color = dcn10_update_visual_confirm_color,
.subvp_pipe_control_lock_fast = dcn32_subvp_pipe_control_lock_fast,
@@ -159,6 +160,8 @@ static const struct hwseq_private_funcs dcn32_private_funcs = {
.set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
.resync_fifo_dccg_dio = dcn32_resync_fifo_dccg_dio,
.is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .apply_single_controller_ctx_to_hw = dce110_apply_single_controller_ctx_to_hw,
+ .reset_back_end_for_pipe = dcn20_reset_back_end_for_pipe,
};
void dcn32_hw_sequencer_init_functions(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h
index 89a591eb2..89a591eb2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index 325a711a1..1e67374b8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -56,6 +56,7 @@
#include "dcn30/dcn30_cm_common.h"
#include "dcn31/dcn31_hwseq.h"
#include "dcn20/dcn20_hwseq.h"
+#include "dc_state_priv.h"
#define DC_LOGGER_INIT(logger) \
struct dal_logger *dc_logger = logger
@@ -133,6 +134,7 @@ void dcn35_init_hw(struct dc *dc)
struct dc_bios *dcb = dc->ctx->dc_bios;
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
+ uint32_t user_level = MAX_BACKLIGHT_LEVEL;
int i;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
@@ -145,17 +147,36 @@ void dcn35_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
}
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
- PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYESYMCLK_ROOT_GATE_DISABLE, 1);
+ if (!dc->debug.disable_clock_gate) {
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
+ REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
+ PHYESYMCLK_ROOT_GATE_DISABLE, 1);
+
+ REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4,
+ DPIASYMCLK0_GATE_DISABLE, 0,
+ DPIASYMCLK1_GATE_DISABLE, 0,
+ DPIASYMCLK2_GATE_DISABLE, 0,
+ DPIASYMCLK3_GATE_DISABLE, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF);
+ REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
+ DTBCLK_P0_GATE_DISABLE, 0,
+ DTBCLK_P1_GATE_DISABLE, 0,
+ DTBCLK_P2_GATE_DISABLE, 0,
+ DTBCLK_P3_GATE_DISABLE, 0);
+ REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
+ DPSTREAMCLK0_GATE_DISABLE, 0,
+ DPSTREAMCLK1_GATE_DISABLE, 0,
+ DPSTREAMCLK2_GATE_DISABLE, 0,
+ DPSTREAMCLK3_GATE_DISABLE, 0);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0x1f7c3fcf);
+ }
// Initialize the dccg
if (res_pool->dccg->funcs->dccg_init)
@@ -260,13 +281,15 @@ void dcn35_init_hw(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->panel_cntl)
+ if (link->panel_cntl) {
backlight = link->panel_cntl->funcs->hw_init(link->panel_cntl);
+ user_level = link->panel_cntl->stored_backlight_registers.USER_LEVEL;
+ }
}
if (dc->ctx->dmub_srv) {
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (abms[i] != NULL && abms[i]->funcs != NULL)
- abms[i]->funcs->abm_init(abms[i], backlight);
+ abms[i]->funcs->abm_init(abms[i], backlight, user_level);
}
}
@@ -332,9 +355,6 @@ void dcn35_init_hw(struct dc *dc)
if (dc->res_pool->pg_cntl) {
if (dc->res_pool->pg_cntl->funcs->init_pg_status)
dc->res_pool->pg_cntl->funcs->init_pg_status(dc->res_pool->pg_cntl);
-
- if (dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22)
- dc->res_pool->pg_cntl->funcs->set_force_poweron_domain22(dc->res_pool->pg_cntl, false);
}
}
@@ -619,7 +639,7 @@ void dcn35_power_down_on_boot(struct dc *dc)
bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
{
struct dc_link *edp_links[MAX_NUM_EDP];
- int edp_num;
+ int i, edp_num;
if (dc->debug.dmcub_emulation)
return true;
@@ -627,6 +647,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
dc_get_edp_links(dc, edp_links, &edp_num);
if (edp_num == 0 || edp_num > 1)
return false;
+
+ for (i = 0; i < dc->current_state->stream_count; ++i) {
+ struct dc_stream_state *stream = dc->current_state->streams[i];
+
+ if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+ return false;
+ }
}
// TODO: review other cases when idle optimization is allowed
@@ -756,12 +783,12 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
- hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ hws->funcs.plane_atomic_disconnect(dc, context, pipe_ctx);
if (tg->funcs->is_tg_enabled(tg))
tg->funcs->unlock(tg);
- dc->hwss.disable_plane(dc, pipe_ctx);
+ dc->hwss.disable_plane(dc, context, pipe_ctx);
pipe_ctx->stream_res.tg = NULL;
pipe_ctx->plane_res.hubp = NULL;
@@ -888,10 +915,10 @@ void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_state = NULL;
}
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
- bool is_phantom = pipe_ctx->plane_state && pipe_ctx->plane_state->is_phantom;
+ bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
DC_LOGGER_INIT(dc->ctx->logger);
@@ -918,6 +945,8 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
bool hpo_frl_stream_enc_acquired = false;
bool hpo_dp_stream_enc_acquired = false;
int i = 0, j = 0;
+ int edp_num = 0;
+ struct dc_link *edp_links[MAX_NUM_EDP] = { NULL };
memset(update_state, 0, sizeof(struct pg_block_update));
@@ -958,10 +987,24 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
if (pipe_ctx->stream_res.opp)
update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false;
+ }
+ /*domain24 controls all the otg, mpc, opp, as long as one otg is still up, avoid enabling OTG PG*/
+ for (i = 0; i < dc->res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+ if (tg && tg->funcs->is_tg_enabled(tg)) {
+ update_state->pg_pipe_res_update[PG_OPTC][i] = false;
+ break;
+ }
+ }
- if (pipe_ctx->stream_res.tg)
- update_state->pg_pipe_res_update[PG_OPTC][pipe_ctx->stream_res.tg->inst] = false;
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (edp_num == 0 ||
+ ((!edp_links[0] || !edp_links[0]->edp_sink_present) &&
+ (!edp_links[1] || !edp_links[1]->edp_sink_present))) {
+ /*eDP not exist on this config, keep Domain24 power on, for S0i3, this will be handled in dmubfw*/
+ update_state->pg_pipe_res_update[PG_OPTC][0] = false;
}
+
}
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
@@ -1047,8 +1090,29 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
}
-void dcn35_block_power_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on)
+/**
+ * dcn35_hw_block_power_down() - power down sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power down:
+ *
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will pwr dwn at IPS2 entry
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED. will be pwr dwn after lono timer is armed
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn35_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state)
{
int i = 0;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
@@ -1057,64 +1121,106 @@ void dcn35_block_power_control(struct dc *dc,
return;
if (dc->debug.ignore_pg)
return;
+
if (update_state->pg_res_update[PG_HPO]) {
if (pg_cntl->funcs->hpo_pg_control)
- pg_cntl->funcs->hpo_pg_control(pg_cntl, power_on);
+ pg_cntl->funcs->hpo_pg_control(pg_cntl, false);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
if (pg_cntl->funcs->hubp_dpp_pg_control)
- pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, power_on);
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, false);
}
-
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (pg_cntl->funcs->dsc_pg_control)
- pg_cntl->funcs->dsc_pg_control(pg_cntl, i, power_on);
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, false);
}
- if (update_state->pg_pipe_res_update[PG_MPCC][i]) {
- if (pg_cntl->funcs->mpcc_pg_control)
- pg_cntl->funcs->mpcc_pg_control(pg_cntl, i, power_on);
- }
-
- if (update_state->pg_pipe_res_update[PG_OPP][i]) {
- if (pg_cntl->funcs->opp_pg_control)
- pg_cntl->funcs->opp_pg_control(pg_cntl, i, power_on);
- }
- if (update_state->pg_pipe_res_update[PG_OPTC][i]) {
- if (pg_cntl->funcs->optc_pg_control)
- pg_cntl->funcs->optc_pg_control(pg_cntl, i, power_on);
- }
- }
+ /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, false);
- if (update_state->pg_res_update[PG_DWB]) {
- if (pg_cntl->funcs->dwb_pg_control)
- pg_cntl->funcs->dwb_pg_control(pg_cntl, power_on);
- }
+ //domain22, 23, 25 currently always on.
- if (pg_cntl->funcs->plane_otg_pg_control)
- pg_cntl->funcs->plane_otg_pg_control(pg_cntl, power_on);
}
-void dcn35_root_clock_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on)
+/**
+ * dcn35_hw_block_power_up() - power up sequence
+ *
+ * The following sequence describes the ON-OFF (ONO) for power up:
+ *
+ * ONO Region 0, DCPG 22: dccg dio dcio - SKIPPED
+ * ONO Region 2, DCPG 24: mpc opp optc dwb
+ * ONO Region 5, DCPG 16: dsc0
+ * ONO Region 7, DCPG 17: dsc1
+ * ONO Region 9, DCPG 18: dsc2
+ * ONO Region 11, DCPG 19: dsc3
+ * ONO Region 1, DCPG 23: dchubbub dchvm dchubbubmem - SKIPPED. PMFW will power up at IPS2 exit
+ * ONO Region 4, DCPG 0: dchubp0, dpp0
+ * ONO Region 6, DCPG 1: dchubp1, dpp1
+ * ONO Region 8, DCPG 2: dchubp2, dpp2
+ * ONO Region 10, DCPG 3: dchubp3, dpp3
+ * ONO Region 3, DCPG 25: hpo - SKIPPED
+ *
+ * @dc: Current DC state
+ * @update_state: update PG sequence states for HW block
+ */
+void dcn35_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state)
{
int i = 0;
struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
if (!pg_cntl)
return;
+ if (dc->debug.ignore_pg)
+ return;
+ //domain22, 23, 25 currently always on.
+ /*this will need all the clients to unregister optc interruts let dmubfw handle this*/
+ if (pg_cntl->funcs->plane_otg_pg_control)
+ pg_cntl->funcs->plane_otg_pg_control(pg_cntl, true);
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++)
+ if (update_state->pg_pipe_res_update[PG_DSC][i]) {
+ if (pg_cntl->funcs->dsc_pg_control)
+ pg_cntl->funcs->dsc_pg_control(pg_cntl, i, true);
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
update_state->pg_pipe_res_update[PG_DPP][i]) {
- if (dc->hwseq->funcs.dpp_root_clock_control)
- dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ if (pg_cntl->funcs->hubp_dpp_pg_control)
+ pg_cntl->funcs->hubp_dpp_pg_control(pg_cntl, i, true);
}
+ }
+ if (update_state->pg_res_update[PG_HPO]) {
+ if (pg_cntl->funcs->hpo_pg_control)
+ pg_cntl->funcs->hpo_pg_control(pg_cntl, true);
+ }
+}
+void dcn35_root_clock_control(struct dc *dc,
+ struct pg_block_update *update_state, bool power_on)
+{
+ int i = 0;
+ struct pg_cntl *pg_cntl = dc->res_pool->pg_cntl;
+ if (!pg_cntl)
+ return;
+ /*enable root clock first when power up*/
+ if (power_on)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (dc->hwseq->funcs.dpp_root_clock_control)
+ dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ }
+ }
+ for (i = 0; i < dc->res_pool->res_cap->num_dsc; i++) {
if (update_state->pg_pipe_res_update[PG_DSC][i]) {
if (power_on) {
if (dc->res_pool->dccg->funcs->enable_dsc)
@@ -1125,6 +1231,15 @@ void dcn35_root_clock_control(struct dc *dc,
}
}
}
+ /*disable root clock first when power down*/
+ if (!power_on)
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (update_state->pg_pipe_res_update[PG_HUBP][i] &&
+ update_state->pg_pipe_res_update[PG_DPP][i]) {
+ if (dc->hwseq->funcs.dpp_root_clock_control)
+ dc->hwseq->funcs.dpp_root_clock_control(dc->hwseq, i, power_on);
+ }
+ }
}
void dcn35_prepare_bandwidth(
@@ -1138,9 +1253,9 @@ void dcn35_prepare_bandwidth(
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, true);
-
- if (dc->hwss.block_power_control)
- dc->hwss.block_power_control(dc, &pg_update_state, true);
+ /*power up required HW block*/
+ if (dc->hwss.hw_block_power_up)
+ dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
dcn20_prepare_bandwidth(dc, context);
@@ -1156,9 +1271,9 @@ void dcn35_optimize_bandwidth(
if (dc->hwss.calc_blocks_to_gate) {
dc->hwss.calc_blocks_to_gate(dc, context, &pg_update_state);
-
- if (dc->hwss.block_power_control)
- dc->hwss.block_power_control(dc, &pg_update_state, false);
+ /*try to power down unused block*/
+ if (dc->hwss.hw_block_power_down)
+ dc->hwss.hw_block_power_down(dc, &pg_update_state);
if (dc->hwss.root_clock_control)
dc->hwss.root_clock_control(dc, &pg_update_state, false);
@@ -1180,3 +1295,44 @@ uint32_t dcn35_get_idle_state(const struct dc *dc)
return 0;
}
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, struct dc_crtc_timing_adjust adjust)
+{
+ int i = 0;
+ struct drr_params params = {0};
+ // DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
+ unsigned int event_triggers = 0x800;
+ // Note DRR trigger events are generated regardless of whether num frames met.
+ unsigned int num_frames = 2;
+
+ params.vertical_total_max = adjust.v_total_max;
+ params.vertical_total_min = adjust.v_total_min;
+ params.vertical_total_mid = adjust.v_total_mid;
+ params.vertical_total_mid_frame_num = adjust.v_total_mid_frame_num;
+
+ for (i = 0; i < num_pipes; i++) {
+ if ((pipe_ctx[i]->stream_res.tg != NULL) && pipe_ctx[i]->stream_res.tg->funcs) {
+ struct dc_crtc_timing *timing = &pipe_ctx[i]->stream->timing;
+ struct dc *dc = pipe_ctx[i]->stream->ctx->dc;
+
+ if (dc->debug.static_screen_wait_frames) {
+ unsigned int frame_rate = timing->pix_clk_100hz / (timing->h_total * timing->v_total);
+
+ if (frame_rate >= 120 && dc->caps.ips_support &&
+ dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
+ /*ips enable case*/
+ num_frames = 2 * (frame_rate % 60);
+ }
+ }
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_drr)
+ pipe_ctx[i]->stream_res.tg->funcs->set_drr(
+ pipe_ctx[i]->stream_res.tg, &params);
+ if (adjust.v_total_max != 0 && adjust.v_total_min != 0)
+ if (pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control)
+ pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
+ pipe_ctx[i]->stream_res.tg,
+ event_triggers, num_frames);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
index 0dff10d17..fd66316e3 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.h
@@ -57,14 +57,16 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context);
void dcn35_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn35_enable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx,
struct dc_state *context);
-void dcn35_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
-void dcn35_block_power_control(struct dc *dc,
- struct pg_block_update *update_state, bool power_on);
+void dcn35_hw_block_power_up(struct dc *dc,
+ struct pg_block_update *update_state);
+void dcn35_hw_block_power_down(struct dc *dc,
+ struct pg_block_update *update_state);
void dcn35_root_clock_control(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
@@ -84,4 +86,8 @@ void dcn35_dsc_pg_control(
void dcn35_set_idle_state(const struct dc *dc, bool allow_idle);
uint32_t dcn35_get_idle_state(const struct dc *dc);
+
+void dcn35_set_drr(struct pipe_ctx **pipe_ctx,
+ int num_pipes, struct dc_crtc_timing_adjust adjust);
+
#endif /* __DC_HWSS_DCN35_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
index 296bf3a38..a630aa77d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.c
@@ -68,7 +68,7 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.prepare_bandwidth = dcn35_prepare_bandwidth,
.optimize_bandwidth = dcn35_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
- .set_drr = dcn10_set_drr,
+ .set_drr = dcn35_set_drr,
.get_position = dcn10_get_position,
.set_static_screen_control = dcn30_set_static_screen_control,
.setup_stereo = dcn10_setup_stereo,
@@ -118,7 +118,8 @@ static const struct hw_sequencer_funcs dcn35_funcs = {
.update_dsc_pg = dcn32_update_dsc_pg,
.calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
.calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
- .block_power_control = dcn35_block_power_control,
+ .hw_block_power_up = dcn35_hw_block_power_up,
+ .hw_block_power_down = dcn35_hw_block_power_down,
.root_clock_control = dcn35_root_clock_control,
.set_idle_state = dcn35_set_idle_state,
.get_idle_state = dcn35_get_idle_state
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h
index b67015032..b67015032 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_init.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_init.h
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
new file mode 100644
index 000000000..951ca2da4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dcn351_init.c
+ dcn351_init.h
+)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
new file mode 100644
index 000000000..b24ad27fe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/Makefile
@@ -0,0 +1,17 @@
+#
+# (c) Copyright 2022 Advanced Micro Devices, Inc. All the rights reserved
+#
+# All rights reserved. This notice is intended as a precaution against
+# inadvertent publication and does not imply publication or any waiver
+# of confidentiality. The year included in the foregoing notice is the
+# year of creation of the work.
+#
+# Authors: AMD
+#
+# Makefile for DCN351.
+
+DCN351 = dcn351_init.o
+
+AMD_DAL_DCN351 = $(addprefix $(AMDDALPATH)/dc/dcn351/,$(DCN351))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN351)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
new file mode 100644
index 000000000..143d3fc02
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c
@@ -0,0 +1,171 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hwseq.h"
+#include "dcn10/dcn10_hwseq.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn21/dcn21_hwseq.h"
+#include "dcn30/dcn30_hwseq.h"
+#include "dcn301/dcn301_hwseq.h"
+#include "dcn31/dcn31_hwseq.h"
+#include "dcn32/dcn32_hwseq.h"
+#include "dcn35/dcn35_hwseq.h"
+
+#include "dcn351_init.h"
+
+static const struct hw_sequencer_funcs dcn351_funcs = {
+ .program_gamut_remap = dcn30_program_gamut_remap,
+ .init_hw = dcn35_init_hw,
+ .power_down_on_boot = dcn35_power_down_on_boot,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dcn31_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dcn20_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn32_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn35_disable_plane,
+ .disable_pixel_data = dcn20_disable_pixel_data,
+ .pipe_control_lock = dcn20_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn35_prepare_bandwidth,
+ .optimize_bandwidth = dcn35_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn30_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dcn30_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_T12 = dce110_edp_wait_for_T12,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn10_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .enable_writeback = dcn30_enable_writeback,
+ .disable_writeback = dcn30_disable_writeback,
+ .update_writeback = dcn30_update_writeback,
+ .mmhubbub_warmup = dcn30_mmhubbub_warmup,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .program_dmdata_engine = dcn30_program_dmdata_engine,
+ .set_dmdata_attributes = dcn20_set_dmdata_attributes,
+ .init_sys_ctx = dcn31_init_sys_ctx,
+ .init_vm_ctx = dcn20_init_vm_ctx,
+ .set_flip_control_gsl = dcn20_set_flip_control_gsl,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
+ .set_backlight_level = dcn21_set_backlight_level,
+ .set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
+ .set_pipe = dcn21_set_pipe,
+ .enable_lvds_link_output = dce110_enable_lvds_link_output,
+ .enable_tmds_link_output = dce110_enable_tmds_link_output,
+ .enable_dp_link_output = dce110_enable_dp_link_output,
+ .disable_link_output = dcn32_disable_link_output,
+ .z10_restore = dcn35_z10_restore,
+ .z10_save_init = dcn31_z10_save_init,
+ .set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
+ .optimize_pwr_state = dcn21_optimize_pwr_state,
+ .exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state,
+ .update_visual_confirm_color = dcn10_update_visual_confirm_color,
+ .apply_idle_power_optimizations = dcn35_apply_idle_power_optimizations,
+ .update_dsc_pg = dcn32_update_dsc_pg,
+ .calc_blocks_to_gate = dcn35_calc_blocks_to_gate,
+ .calc_blocks_to_ungate = dcn35_calc_blocks_to_ungate,
+ .hw_block_power_up = dcn35_hw_block_power_up,
+ .hw_block_power_down = dcn35_hw_block_power_down,
+ .root_clock_control = dcn35_root_clock_control,
+ .set_idle_state = dcn35_set_idle_state,
+ .get_idle_state = dcn35_get_idle_state
+};
+
+static const struct hwseq_private_funcs dcn351_private_funcs = {
+ .init_pipes = dcn35_init_pipes,
+ .update_plane_addr = dcn20_update_plane_addr,
+ .plane_atomic_disconnect = dcn10_plane_atomic_disconnect,
+ .update_mpcc = dcn20_update_mpcc,
+ .set_input_transfer_func = dcn32_set_input_transfer_func,
+ .set_output_transfer_func = dcn32_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn31_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn20_init_blank,
+ .disable_vga = NULL,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn35_plane_atomic_disable,
+ //.plane_atomic_disable = dcn20_plane_atomic_disable,/*todo*/
+ //.hubp_pg_control = dcn35_hubp_pg_control,
+ .enable_power_gating_plane = dcn35_enable_power_gating_plane,
+ .dpp_root_clock_control = dcn35_dpp_root_clock_control,
+ .program_all_writeback_pipes_in_tree = dcn30_program_all_writeback_pipes_in_tree,
+ .update_odm = dcn35_update_odm,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_mcm_luts = dcn32_set_mcm_luts,
+ .setup_hpo_hw_control = dcn35_setup_hpo_hw_control,
+ .calculate_dccg_k1_k2_values = dcn32_calculate_dccg_k1_k2_values,
+ .set_pixels_per_cycle = dcn32_set_pixels_per_cycle,
+ .is_dp_dig_pixel_rate_div_policy = dcn32_is_dp_dig_pixel_rate_div_policy,
+ .dsc_pg_control = dcn35_dsc_pg_control,
+ .dsc_pg_status = dcn32_dsc_pg_status,
+ .enable_plane = dcn35_enable_plane,
+};
+
+void dcn351_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn351_funcs;
+ dc->hwseq->funcs = dcn351_private_funcs;
+
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h
new file mode 100644
index 000000000..970b01008
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2023 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN351_INIT_H__
+#define __DC_DCN351_INIT_H__
+
+struct dc;
+
+void dcn351_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN351_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
index 452680fe9..64ca7c665 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h
@@ -50,7 +50,7 @@ struct pg_block_update;
struct subvp_pipe_control_lock_fast_params {
struct dc *dc;
bool lock;
- struct pipe_ctx *pipe_ctx;
+ bool subvp_immediate_flip;
};
struct pipe_control_lock_params {
@@ -200,7 +200,7 @@ struct hw_sequencer_funcs {
struct dc_state *context);
enum dc_status (*apply_ctx_to_hw)(struct dc *dc,
struct dc_state *context);
- void (*disable_plane)(struct dc *dc, struct pipe_ctx *pipe_ctx);
+ void (*disable_plane)(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx);
void (*disable_pixel_data)(struct dc *dc, struct pipe_ctx *pipe_ctx, bool blank);
void (*apply_ctx_for_surface)(struct dc *dc,
const struct dc_stream_state *stream,
@@ -248,6 +248,7 @@ struct hw_sequencer_funcs {
void (*enable_per_frame_crtc_position_reset)(struct dc *dc,
int group_size, struct pipe_ctx *grouped_pipes[]);
void (*enable_timing_synchronization)(struct dc *dc,
+ struct dc_state *state,
int group_index, int group_size,
struct pipe_ctx *grouped_pipes[]);
void (*enable_vblanks_synchronization)(struct dc *dc,
@@ -378,6 +379,7 @@ struct hw_sequencer_funcs {
struct dc_cursor_attributes *cursor_attr);
void (*commit_subvp_config)(struct dc *dc, struct dc_state *context);
void (*enable_phantom_streams)(struct dc *dc, struct dc_state *context);
+ void (*disable_phantom_streams)(struct dc *dc, struct dc_state *context);
void (*subvp_pipe_control_lock)(struct dc *dc,
struct dc_state *context,
bool lock,
@@ -414,8 +416,10 @@ struct hw_sequencer_funcs {
struct pg_block_update *update_state);
void (*calc_blocks_to_ungate)(struct dc *dc, struct dc_state *context,
struct pg_block_update *update_state);
- void (*block_power_control)(struct dc *dc,
- struct pg_block_update *update_state, bool power_on);
+ void (*hw_block_power_up)(struct dc *dc,
+ struct pg_block_update *update_state);
+ void (*hw_block_power_down)(struct dc *dc,
+ struct pg_block_update *update_state);
void (*root_clock_control)(struct dc *dc,
struct pg_block_update *update_state, bool power_on);
void (*set_idle_state)(const struct dc *dc, bool allow_idle);
@@ -452,17 +456,18 @@ void get_mpctree_visual_confirm_color(
struct tg_color *color);
void get_subvp_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
void get_mclk_switch_visual_confirm_color(
- struct dc *dc,
- struct dc_state *context,
struct pipe_ctx *pipe_ctx,
struct tg_color *color);
+void set_p_state_switch_method(
+ struct dc *dc,
+ struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+
void hwss_execute_sequence(struct dc *dc,
struct block_sequence block_sequence[],
int num_steps);
@@ -472,7 +477,8 @@ void hwss_build_fast_sequence(struct dc *dc,
unsigned int dmub_cmd_count,
struct block_sequence block_sequence[],
int *num_steps,
- struct pipe_ctx *pipe_ctx);
+ struct pipe_ctx *pipe_ctx,
+ struct dc_stream_status *stream_status);
void hwss_send_dmcub_cmd(union block_sequence_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
index 82c592166..b3c62a82c 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer_private.h
@@ -79,6 +79,7 @@ struct hwseq_private_funcs {
void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
void (*plane_atomic_disconnect)(struct dc *dc,
+ struct dc_state *state,
struct pipe_ctx *pipe_ctx);
void (*update_mpcc)(struct dc *dc, struct pipe_ctx *pipe_ctx);
bool (*set_input_transfer_func)(struct dc *dc,
@@ -164,8 +165,15 @@ struct hwseq_private_funcs {
void (*set_pixels_per_cycle)(struct pipe_ctx *pipe_ctx);
void (*resync_fifo_dccg_dio)(struct dce_hwseq *hws, struct dc *dc,
struct dc_state *context);
+ enum dc_status (*apply_single_controller_ctx_to_hw)(
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context,
+ struct dc *dc);
bool (*is_dp_dig_pixel_rate_div_policy)(struct pipe_ctx *pipe_ctx);
#endif
+ void (*reset_back_end_for_pipe)(struct dc *dc,
+ struct pipe_ctx *pipe_ctx,
+ struct dc_state *context);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index bc9cda329..3a6bf77a6 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -200,11 +200,7 @@ struct resource_funcs {
unsigned int pipe_cnt,
unsigned int index);
- bool (*remove_phantom_pipes)(struct dc *dc, struct dc_state *context, bool fast_update);
- void (*retain_phantom_pipes)(struct dc *dc, struct dc_state *context);
void (*get_panel_config_defaults)(struct dc_panel_config *panel_config);
- void (*save_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
- void (*restore_mall_state)(struct dc *dc, struct dc_state *context, struct mall_temp_config *temp_config);
void (*build_pipe_pix_clk_params)(struct pipe_ctx *pipe_ctx);
};
@@ -385,6 +381,16 @@ union pipe_update_flags {
uint32_t raw;
};
+enum p_state_switch_method {
+ P_STATE_UNKNOWN = 0,
+ P_STATE_V_BLANK = 1,
+ P_STATE_FPO,
+ P_STATE_V_ACTIVE,
+ P_STATE_SUB_VP,
+ P_STATE_DRR_SUB_VP,
+ P_STATE_V_BLANK_SUB_VP
+};
+
struct pipe_ctx {
struct dc_plane_state *plane_state;
struct dc_stream_state *stream;
@@ -433,6 +439,7 @@ struct pipe_ctx {
struct dwbc *dwbc;
struct mcif_wb *mcif_wb;
union pipe_update_flags update_flags;
+ enum p_state_switch_method p_state_type;
struct tg_color visual_confirm_color;
bool has_vactive_margin;
/* subvp_index: only valid if the pipe is a SUBVP_MAIN*/
@@ -528,6 +535,14 @@ struct dc_state {
* @stream_status: Planes status on a given stream
*/
struct dc_stream_status stream_status[MAX_PIPES];
+ /**
+ * @phantom_streams: Stream state properties for phantoms
+ */
+ struct dc_stream_state *phantom_streams[MAX_PHANTOM_PIPES];
+ /**
+ * @phantom_planes: Planes state properties for phantoms
+ */
+ struct dc_plane_state *phantom_planes[MAX_PHANTOM_PIPES];
/**
* @stream_count: Total of streams in use
@@ -536,6 +551,14 @@ struct dc_state {
uint8_t stream_mask;
/**
+ * @stream_count: Total phantom streams in use
+ */
+ uint8_t phantom_stream_count;
+ /**
+ * @stream_count: Total phantom planes in use
+ */
+ uint8_t phantom_plane_count;
+ /**
* @res_ctx: Persistent state of resources
*/
struct resource_context res_ctx;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 9f521cf0f..3f0161d64 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -36,7 +36,7 @@ struct abm {
};
struct abm_funcs {
- void (*abm_init)(struct abm *abm, uint32_t back_light);
+ void (*abm_init)(struct abm *abm, uint32_t back_light, uint32_t user_level);
bool (*set_abm_level)(struct abm *abm, unsigned int abm_level);
bool (*set_abm_immediate_disable)(struct abm *abm, unsigned int panel_inst);
bool (*set_pipe)(struct abm *abm, unsigned int controller_id, unsigned int panel_inst);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 55ded5fb8..17e014d3b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -62,6 +62,25 @@ struct dcn3_clk_internal {
uint32_t CLK4_CLK0_CURRENT_CNT; //fclk
};
+struct dcn35_clk_internal {
+ int dummy;
+ uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
+ uint32_t CLK1_CLK1_CURRENT_CNT; //dppclk
+ uint32_t CLK1_CLK2_CURRENT_CNT; //dprefclk
+ uint32_t CLK1_CLK3_CURRENT_CNT; //dcfclk
+ uint32_t CLK1_CLK4_CURRENT_CNT; //dtbclk
+ //uint32_t CLK1_CLK5_CURRENT_CNT; //dpiaclk
+ //uint32_t CLK1_CLK6_CURRENT_CNT; //srdbgclk
+ uint32_t CLK1_CLK3_DS_CNTL; //dcf_deep_sleep_divider
+ uint32_t CLK1_CLK3_ALLOW_DS; //dcf_deep_sleep_allow
+
+ uint32_t CLK1_CLK0_BYPASS_CNTL; //dispclk bypass
+ uint32_t CLK1_CLK1_BYPASS_CNTL; //dppclk bypass
+ uint32_t CLK1_CLK2_BYPASS_CNTL; //dprefclk bypass
+ uint32_t CLK1_CLK3_BYPASS_CNTL; //dcfclk bypass
+ uint32_t CLK1_CLK4_BYPASS_CNTL; //dtbclk bypass
+};
+
struct dcn301_clk_internal {
int dummy;
uint32_t CLK1_CLK0_CURRENT_CNT; //dispclk
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 6b44557fc..b9a06bf84 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -59,8 +59,8 @@ enum dentist_dispclk_change_mode {
struct dp_dto_params {
int otg_inst;
enum signal_type signal;
- long long pixclk_hz;
- long long refclk_hz;
+ uint64_t pixclk_hz;
+ uint64_t refclk_hz;
};
enum pixel_rate_div {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 86b711dcc..729ca0064 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -188,6 +188,10 @@ struct dwbc_funcs {
bool (*is_enabled)(
struct dwbc *dwbc);
+ void (*set_fc_enable)(
+ struct dwbc *dwbc,
+ enum dwb_frame_capture_enable enable);
+
void (*set_stereo)(
struct dwbc *dwbc,
struct dwb_stereo_params *stereo_params);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index b95ae9596..dcae23fae 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -43,6 +43,7 @@
* to be used inside loops and for determining array sizes.
*/
#define MAX_PIPES 6
+#define MAX_PHANTOM_PIPES (MAX_PIPES / 2)
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
#define MAX_HPO_DP2_ENCODERS 4
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7617fabbd..071792081 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -321,6 +321,9 @@ struct opp_funcs {
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
+ bool (*dpg_is_pending)(struct output_pixel_processor *opp);
+
+
void (*opp_dpg_set_blank_color)(
struct output_pixel_processor *opp,
const struct tg_color *color);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
index 660897e12..e97d964a1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/panel_cntl.h
@@ -40,6 +40,7 @@ struct panel_cntl_backlight_registers {
unsigned int BL_PWM_PERIOD_CNTL;
unsigned int LVTMA_PWRSEQ_REF_DIV_BL_PWM_REF_DIV;
unsigned int PANEL_PWRSEQ_REF_DIV2;
+ unsigned int USER_LEVEL;
};
struct panel_cntl_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
index b9812afb8..00ea3864d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h
@@ -47,8 +47,6 @@ struct pg_cntl_funcs {
void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on);
void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on);
void (*init_pg_status)(struct pg_cntl *pg_cntl);
-
- void (*set_force_poweron_domain22)(struct pg_cntl *pg_cntl, bool power_on);
};
#endif //__DC_PG_CNTL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 9a00a9931..cad3e5f14 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -333,6 +333,7 @@ struct timing_generator_funcs {
void (*init_odm)(struct timing_generator *tg);
void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg);
+ void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h
index d76853681..bf29fc58e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link.h
@@ -281,11 +281,16 @@ struct link_service {
const unsigned int *power_opts);
bool (*edp_setup_replay)(struct dc_link *link,
const struct dc_stream_state *stream);
+ bool (*edp_send_replay_cmd)(struct dc_link *link,
+ enum replay_FW_Message_type msg,
+ union dmub_replay_cmd_set *cmd_data);
bool (*edp_set_coasting_vtotal)(
- struct dc_link *link, uint16_t coasting_vtotal);
+ struct dc_link *link, uint32_t coasting_vtotal);
bool (*edp_replay_residency)(const struct dc_link *link,
unsigned int *residency, const bool is_start,
const bool is_alpm);
+ bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link,
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool (*edp_wait_for_t12)(struct dc_link *link);
bool (*edp_is_ilr_optimization_required)(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 3d7244393..77a60aa9f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -497,6 +497,18 @@ int recource_find_free_pipe_not_used_in_cur_res_ctx(
const struct resource_pool *pool);
/*
+ * Look for a free pipe in new resource context that is used in current resource
+ * context as an OTG master pipe.
+ *
+ * return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
+ * pipe idx of the free pipe
+ */
+int recource_find_free_pipe_used_as_otg_master_in_cur_res_ctx(
+ const struct resource_context *cur_res_ctx,
+ struct resource_context *new_res_ctx,
+ const struct resource_pool *pool);
+
+/*
* Look for a free pipe in new resource context that is used as a secondary DPP
* pipe in any MPCC combine in current resource context.
* return - FREE_PIPE_INDEX_NOT_FOUND if free pipe is not found, otherwise
@@ -557,9 +569,6 @@ void update_audio_usage(
unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
-void get_audio_check(struct audio_info *aud_modes,
- struct audio_check *aud_chk);
-
bool get_temp_dp_link_res(struct dc_link *link,
struct link_resource *link_res,
struct dc_link_settings *link_settings);
@@ -606,5 +615,4 @@ enum dc_status update_dp_encoder_resources_for_test_harness(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool check_subvp_sw_cursor_fallback_req(const struct dc *dc, struct dc_stream_state *stream);
-
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index 007ee32c2..3cbfbf8d1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -1279,86 +1279,6 @@ static void remove_stream_from_alloc_table(
}
}
-static enum dc_status deallocate_mst_payload_with_temp_drm_wa(
- struct pipe_ctx *pipe_ctx)
-{
- struct dc_stream_state *stream = pipe_ctx->stream;
- struct dc_link *link = stream->link;
- struct dc_dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
- int i;
- bool mst_mode = (link->type == dc_connection_mst_branch);
- /* adjust for drm changes*/
- const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- const struct dc_link_settings empty_link_settings = {0};
- DC_LOGGER_INIT(link->ctx->logger);
-
- if (link_hwss->ext.set_throttled_vcp_size)
- link_hwss->ext.set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp);
- if (link_hwss->ext.set_hblank_min_symbol_width)
- link_hwss->ext.set_hblank_min_symbol_width(pipe_ctx,
- &empty_link_settings,
- avg_time_slots_per_mtp);
-
- if (dm_helpers_dp_mst_write_payload_allocation_table(
- stream->ctx,
- stream,
- &proposed_table,
- false))
- update_mst_stream_alloc_table(
- link,
- pipe_ctx->stream_res.stream_enc,
- pipe_ctx->stream_res.hpo_dp_stream_enc,
- &proposed_table);
- else
- DC_LOG_WARNING("Failed to update"
- "MST allocation table for"
- "pipe idx:%d\n",
- pipe_ctx->pipe_idx);
-
- DC_LOG_MST("%s"
- "stream_count: %d: ",
- __func__,
- link->mst_stream_alloc_table.stream_count);
-
- for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
- DC_LOG_MST("stream_enc[%d]: %p "
- "stream[%d].hpo_dp_stream_enc: %p "
- "stream[%d].vcp_id: %d "
- "stream[%d].slot_count: %d\n",
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
- i,
- (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
- i,
- link->mst_stream_alloc_table.stream_allocations[i].slot_count);
- }
-
- if (link_hwss->ext.update_stream_allocation_table == NULL ||
- link_dp_get_encoding_format(&link->cur_link_settings) == DP_UNKNOWN_ENCODING) {
- DC_LOG_DEBUG("Unknown encoding format\n");
- return DC_ERROR_UNEXPECTED;
- }
-
- link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
- &link->mst_stream_alloc_table);
-
- if (mst_mode) {
- dm_helpers_dp_mst_poll_for_allocation_change_trigger(
- stream->ctx,
- stream);
- }
-
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- false);
-
- return DC_OK;
-}
-
static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -1371,9 +1291,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
const struct dc_link_settings empty_link_settings = {0};
DC_LOGGER_INIT(link->ctx->logger);
- if (link->dc->debug.temp_mst_deallocation_sequence)
- return deallocate_mst_payload_with_temp_drm_wa(pipe_ctx);
-
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -1446,16 +1363,14 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link_hwss->ext.update_stream_allocation_table(link, &pipe_ctx->link_res,
&link->mst_stream_alloc_table);
- if (mst_mode) {
+ if (mst_mode)
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
stream->ctx,
stream);
- dm_helpers_dp_mst_send_payload_allocation(
- stream->ctx,
- stream,
- false);
- }
+ dm_helpers_dp_mst_update_mst_mgr_for_deallocation(
+ stream->ctx,
+ stream);
return DC_OK;
}
@@ -1536,12 +1451,10 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx)
stream->ctx,
stream);
- if (ret != ACT_LINK_LOST) {
+ if (ret != ACT_LINK_LOST)
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
- stream,
- true);
- }
+ stream);
/* slot X.Y for only current stream */
pbn_per_slot = get_pbn_per_slot(stream);
@@ -1801,8 +1714,7 @@ enum dc_status link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in
/* send ALLOCATE_PAYLOAD sideband message with updated pbn */
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
- stream,
- true);
+ stream);
/* notify immediate branch device table update */
if (dm_helpers_dp_mst_write_payload_allocation_table(
@@ -1931,8 +1843,7 @@ enum dc_status link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_
/* send ALLOCATE_PAYLOAD sideband message with updated pbn */
dm_helpers_dp_mst_send_payload_allocation(
stream->ctx,
- stream,
- true);
+ stream);
}
/* increase throttled vcp size */
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
index 41f8230f9..cf22b8f28 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c
@@ -213,8 +213,10 @@ static void construct_link_service_edp_panel_control(struct link_service *link_s
link_srv->edp_get_replay_state = edp_get_replay_state;
link_srv->edp_set_replay_allow_active = edp_set_replay_allow_active;
link_srv->edp_setup_replay = edp_setup_replay;
+ link_srv->edp_send_replay_cmd = edp_send_replay_cmd;
link_srv->edp_set_coasting_vtotal = edp_set_coasting_vtotal;
link_srv->edp_replay_residency = edp_replay_residency;
+ link_srv->edp_set_replay_power_opt_and_coasting_vtotal = edp_set_replay_power_opt_and_coasting_vtotal;
link_srv->edp_wait_for_t12 = edp_wait_for_t12;
link_srv->edp_is_ilr_optimization_required =
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
index 4a954317d..595fb0594 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h
+++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h
@@ -25,6 +25,7 @@
#ifndef __LINK_VALIDATION_H__
#define __LINK_VALIDATION_H__
#include "link.h"
+
enum dc_status link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
index 2f11eaabb..289f5d133 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c
@@ -412,12 +412,18 @@ static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link)
{
enum dc_link_rate cable_max_link_rate = LINK_RATE_UNKNOWN;
- if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20)
+ if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR20) {
cable_max_link_rate = LINK_RATE_UHBR20;
- else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY)
+ } else if (link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) {
cable_max_link_rate = LINK_RATE_UHBR13_5;
- else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10)
- cable_max_link_rate = LINK_RATE_UHBR10;
+ } else if (link->dpcd_caps.cable_id.bits.UHBR10_20_CAPABILITY & DP_UHBR10) {
+ // allow DP40 cables to do UHBR13.5 for passive or unknown cable type
+ if (link->dpcd_caps.cable_id.bits.CABLE_TYPE < 2) {
+ cable_max_link_rate = LINK_RATE_UHBR13_5;
+ } else {
+ cable_max_link_rate = LINK_RATE_UHBR10;
+ }
+ }
return cable_max_link_rate;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
index 982eda3c4..6af42ba98 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c
@@ -82,25 +82,33 @@ bool dpia_query_hpd_status(struct dc_link *link)
{
union dmub_rb_cmd cmd = {0};
struct dc_dmub_srv *dmub_srv = link->ctx->dmub_srv;
- bool is_hpd_high = false;
/* prepare QUERY_HPD command */
cmd.query_hpd.header.type = DMUB_CMD__QUERY_HPD_STATE;
cmd.query_hpd.data.instance = link->link_id.enum_id - ENUM_ID_1;
cmd.query_hpd.data.ch_type = AUX_CHANNEL_DPIA;
- /* Return HPD status reported by DMUB if query successfully executed. */
- if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
- cmd.query_hpd.data.status == AUX_RET_SUCCESS)
- is_hpd_high = cmd.query_hpd.data.result;
-
- DC_LOG_DEBUG("%s: link(%d) dpia(%d) cmd_status(%d) result(%d)\n",
- __func__,
- link->link_index,
- link->link_id.enum_id - ENUM_ID_1,
- cmd.query_hpd.data.status,
- cmd.query_hpd.data.result);
-
- return is_hpd_high;
+ /* Query dpia hpd status from dmub */
+ if (dc_wake_and_execute_dmub_cmd(dmub_srv->ctx, &cmd,
+ DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) &&
+ cmd.query_hpd.data.status == AUX_RET_SUCCESS) {
+ DC_LOG_DEBUG("%s: for link(%d) dpia(%d) success, current_hpd_status(%d) new_hpd_status(%d)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->hpd_status,
+ cmd.query_hpd.data.result);
+ link->hpd_status = cmd.query_hpd.data.result;
+ } else {
+ DC_LOG_ERROR("%s: for link(%d) dpia(%d) failed with status(%d), current_hpd_status(%d) new_hpd_status(0)\n",
+ __func__,
+ link->link_index,
+ link->link_id.enum_id - ENUM_ID_1,
+ cmd.query_hpd.data.status,
+ link->hpd_status);
+ link->hpd_status = false;
+ }
+
+ return link->hpd_status;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 9eadc2c7f..ba69874be 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -265,7 +265,7 @@ void dp_handle_link_loss(struct dc_link *link)
for (i = count - 1; i >= 0; i--) {
// Always use max settings here for DP 1.4a LL Compliance CTS
- if (link->is_automated) {
+ if (link->skip_fallback_on_link_loss) {
pipes[i]->link_config.dp_link_settings.lane_count =
link->verified_link_cap.lane_count;
pipes[i]->link_config.dp_link_settings.link_rate =
@@ -404,7 +404,9 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
// Workaround for DP 1.4a LL Compliance CTS as USB4 has to share encoders unlike DP and USBC
- link->is_automated = true;
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->skip_fallback_on_link_loss = true;
+
device_service_clear.bits.AUTOMATED_TEST = 1;
core_link_write_dpcd(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
index 4f4e899e5..5d36bab00 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c
@@ -619,7 +619,7 @@ static enum link_training_result dpia_training_eq_non_transparent(
uint32_t retries_eq = 0;
enum dc_status status;
enum dc_dp_training_pattern tr_pattern;
- uint32_t wait_time_microsec;
+ uint32_t wait_time_microsec = 0;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_align_status_updated dpcd_lane_status_updated = {0};
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
@@ -811,7 +811,7 @@ static enum link_training_result dpia_training_eq_transparent(
/* Take into consideration corner case for DP 1.4a LL Compliance CTS as USB4
* has to share encoders unlike DP and USBC
*/
- if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->is_automated && retries_eq)) {
+ if (dp_is_interlane_aligned(dpcd_lane_status_updated) || (link->skip_fallback_on_link_loss && retries_eq)) {
result = LINK_TRAINING_SUCCESS;
break;
}
@@ -1037,7 +1037,7 @@ enum link_training_result dpia_perform_link_training(
*/
if (result == LINK_TRAINING_SUCCESS) {
fsleep(5000);
- if (!link->is_automated)
+ if (!link->skip_fallback_on_link_loss)
result = dp_check_link_loss_status(link, &lt_settings);
} else if (result == LINK_TRAINING_ABORT)
dpia_training_abort(link, &lt_settings, repeater_id);
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
index 68096d12f..7087cdc9e 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_fixed_vs_pe_retimer.c
@@ -205,6 +205,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
+ const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -293,6 +294,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence_legacy(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_dpmf[0],
+ sizeof(vendor_lttpr_write_data_dpmf));
+
if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
@@ -552,6 +557,7 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
const uint8_t vendor_lttpr_write_data_4lane_3[4] = {0x1, 0x6D, 0xF2, 0x18};
const uint8_t vendor_lttpr_write_data_4lane_4[4] = {0x1, 0x6C, 0xF2, 0x03};
const uint8_t vendor_lttpr_write_data_4lane_5[4] = {0x1, 0x03, 0xF3, 0x06};
+ const uint8_t vendor_lttpr_write_data_dpmf[4] = {0x1, 0x6, 0x70, 0x87};
enum link_training_result status = LINK_TRAINING_SUCCESS;
uint8_t lane = 0;
union down_spread_ctrl downspread = {0};
@@ -639,6 +645,10 @@ enum link_training_result dp_perform_fixed_vs_pe_training_sequence(
DP_DOWNSPREAD_CTRL,
lt_settings->link_settings.link_spread);
+ link_configure_fixed_vs_pe_retimer(link->ddc,
+ &vendor_lttpr_write_data_dpmf[0],
+ sizeof(vendor_lttpr_write_data_dpmf));
+
if (lt_settings->link_settings.lane_count == LANE_COUNT_FOUR) {
link_configure_fixed_vs_pe_retimer(link->ddc,
&vendor_lttpr_write_data_4lane_1[0], sizeof(vendor_lttpr_write_data_4lane_1));
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 7c4a93d3c..d01b77fb9 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -529,6 +529,9 @@ bool edp_set_backlight_level(const struct dc_link *link,
if (dc_is_embedded_signal(link->connector_signal)) {
struct pipe_ctx *pipe_ctx = get_pipe_from_link(link);
+ if (link->panel_cntl)
+ link->panel_cntl->stored_backlight_registers.USER_LEVEL = backlight_pwm_u16_16;
+
if (pipe_ctx) {
/* Disable brightness ramping when the display is blanked
* as it can hang the DMCU
@@ -1001,7 +1004,37 @@ bool edp_setup_replay(struct dc_link *link, const struct dc_stream_state *stream
return true;
}
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal)
+/*
+ * This is general Interface for Replay to set an 32 bit variable to dmub
+ * replay_FW_Message_type: Indicates which instruction or variable pass to DMUB
+ * cmd_data: Value of the config.
+ */
+bool edp_send_replay_cmd(struct dc_link *link,
+ enum replay_FW_Message_type msg,
+ union dmub_replay_cmd_set *cmd_data)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!replay)
+ return false;
+
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ if (dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ cmd_data->panel_inst = panel_inst;
+ else {
+ DC_LOG_DC("%s(): get edp panel inst fail ", __func__);
+ return false;
+ }
+
+ replay->funcs->replay_send_cmd(replay, msg, cmd_data);
+
+ return true;
+}
+
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal)
{
struct dc *dc = link->ctx->dc;
struct dmub_replay *replay = dc->res_pool->replay;
@@ -1039,6 +1072,33 @@ bool edp_replay_residency(const struct dc_link *link,
return true;
}
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+ const unsigned int *power_opts, uint32_t coasting_vtotal)
+{
+ struct dc *dc = link->ctx->dc;
+ struct dmub_replay *replay = dc->res_pool->replay;
+ unsigned int panel_inst;
+
+ if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
+ return false;
+
+ /* Only both power and coasting vtotal changed, this func could return true */
+ if (power_opts && link->replay_settings.replay_power_opt_active != *power_opts &&
+ coasting_vtotal && link->replay_settings.coasting_vtotal != coasting_vtotal) {
+ if (link->replay_settings.replay_feature_enabled &&
+ replay->funcs->replay_set_power_opt_and_coasting_vtotal) {
+ replay->funcs->replay_set_power_opt_and_coasting_vtotal(replay,
+ *power_opts, panel_inst, coasting_vtotal);
+ link->replay_settings.replay_power_opt_active = *power_opts;
+ link->replay_settings.coasting_vtotal = coasting_vtotal;
+ } else
+ return false;
+ } else
+ return false;
+
+ return true;
+}
+
static struct abm *get_abm_from_stream_res(const struct dc_link *link)
{
int i;
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
index a034288ad..a158c6234 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h
@@ -56,10 +56,15 @@ bool edp_set_replay_allow_active(struct dc_link *dc_link, const bool *enable,
bool wait, bool force_static, const unsigned int *power_opts);
bool edp_setup_replay(struct dc_link *link,
const struct dc_stream_state *stream);
-bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal);
+bool edp_send_replay_cmd(struct dc_link *link,
+ enum replay_FW_Message_type msg,
+ union dmub_replay_cmd_set *cmd_data);
+bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal);
bool edp_replay_residency(const struct dc_link *link,
unsigned int *residency, const bool is_start, const bool is_alpm);
bool edp_get_replay_state(const struct dc_link *link, uint64_t *state);
+bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link,
+ const unsigned int *power_opts, uint32_t coasting_vtotal);
bool edp_wait_for_t12(struct dc_link *link);
bool edp_is_ilr_optimization_required(struct dc_link *link,
struct dc_crtc_timing *crtc_timing);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/Makefile b/drivers/gpu/drm/amd/display/dc/optc/Makefile
new file mode 100644
index 000000000..bb213335f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/optc/Makefile
@@ -0,0 +1,108 @@
+
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'optc' sub-component of DAL.
+#
+
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN
+###############################################################################
+
+OPTC_DCN10 = dcn10_optc.o
+
+AMD_DAL_OPTC_DCN10 = $(addprefix $(AMDDALPATH)/dc/optc/dcn10/,$(OPTC_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN10)
+
+###############################################################################
+
+OPTC_DCN20 = dcn20_optc.o
+
+AMD_DAL_OPTC_DCN20 = $(addprefix $(AMDDALPATH)/dc/optc/dcn20/,$(OPTC_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN20)
+
+###############################################################################
+
+OPTC_DCN201 = dcn201_optc.o
+
+AMD_DAL_OPTC_DCN201 = $(addprefix $(AMDDALPATH)/dc/optc/dcn201/,$(OPTC_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN201)
+
+###############################################################################
+
+###############################################################################
+
+###############################################################################
+
+OPTC_DCN30 = dcn30_optc.o
+
+AMD_DAL_OPTC_DCN30 = $(addprefix $(AMDDALPATH)/dc/optc/dcn30/,$(OPTC_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN30)
+
+###############################################################################
+
+OPTC_DCN301 = dcn301_optc.o
+
+AMD_DAL_OPTC_DCN301 = $(addprefix $(AMDDALPATH)/dc/optc/dcn301/,$(OPTC_DCN301))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN301)
+
+###############################################################################
+
+OPTC_DCN31 = dcn31_optc.o
+
+AMD_DAL_OPTC_DCN31 = $(addprefix $(AMDDALPATH)/dc/optc/dcn31/,$(OPTC_DCN31))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN31)
+
+###############################################################################
+
+OPTC_DCN314 = dcn314_optc.o
+
+AMD_DAL_OPTC_DCN314 = $(addprefix $(AMDDALPATH)/dc/optc/dcn314/,$(OPTC_DCN314))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN314)
+
+###############################################################################
+
+OPTC_DCN32 = dcn32_optc.o
+
+AMD_DAL_OPTC_DCN32 = $(addprefix $(AMDDALPATH)/dc/optc/dcn32/,$(OPTC_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN32)
+
+###############################################################################
+
+OPTC_DCN35 = dcn35_optc.o
+
+AMD_DAL_OPTC_DCN35 = $(addprefix $(AMDDALPATH)/dc/optc/dcn35/,$(OPTC_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_OPTC_DCN35)
+
+###############################################################################
+
+###############################################################################
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
index 0e8f4f36c..0e8f4f36c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index ab81594a7..6c2e84d39 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -557,7 +557,8 @@ struct dcn_optc_registers {
type OTG_CRC_DATA_STREAM_SPLIT_MODE;\
type OTG_CRC_DATA_FORMAT;\
type OTG_V_TOTAL_LAST_USED_BY_DRR;\
- type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;
+ type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\
+ type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING;
#define TG_REG_FIELD_LIST_DCN3_2(type) \
type OTG_H_TIMING_DIV_MODE_MANUAL;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
index 58bdbd859..58bdbd859 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
index f7968b9ca..c2e03ced3 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn20/dcn20_optc.h
@@ -26,7 +26,7 @@
#ifndef __DC_OPTC_DCN20_H__
#define __DC_OPTC_DCN20_H__
-#include "../dcn10/dcn10_optc.h"
+#include "dcn10/dcn10_optc.h"
#define TG_COMMON_REG_LIST_DCN2_0(inst) \
TG_COMMON_REG_LIST_DCN(inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
index 70fcbec03..70fcbec03 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h
index e9545b735..e9545b735 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn201/dcn201_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
index b97bdb868..b97bdb868 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
index d3a056c12..d3a056c12 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn30/dcn30_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
index b3cfcb887..b3cfcb887 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h
index b49585682..b49585682 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn301/dcn301_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index 63a677c8e..63a677c8e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
index 30b81a448..30b81a448 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
index 0086cafb0..0086cafb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
index 99c098e76..99c098e76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn314/dcn314_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
index 823493543..52eab8fcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c
@@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi
}
}
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(tg);
+
+ REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000);
+}
+
void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode)
{
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -260,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc)
OTG_V_TOTAL_MAX_SEL, 1,
OTG_FORCE_LOCK_ON_EVENT, 0,
OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */
-
- // Setup manual flow control for EOF via TRIG_A
- optc->funcs->setup_manual_trigger(optc);
}
}
@@ -345,6 +349,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = {
.set_odm_bypass = optc32_set_odm_bypass,
.set_odm_combine = optc32_set_odm_combine,
.get_odm_combine_segments = optc32_get_odm_combine_segments,
+ .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
.set_out_mux = optc3_set_out_mux,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
index 8ce3b178c..0c2c14695 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h
@@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man
void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments);
void optc32_set_odm_bypass(struct timing_generator *optc,
const struct dc_crtc_timing *dc_crtc_timing);
+void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg);
#endif /* __DC_OPTC_DCN32_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
index 5b1547508..5b1547508 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
index 1f422e4c4..1f422e4c4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn35/dcn35_optc.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/Makefile b/drivers/gpu/drm/amd/display/dc/resource/Makefile
new file mode 100644
index 000000000..0a75ed896
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/Makefile
@@ -0,0 +1,199 @@
+
+# Copyright 2022 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+# Makefile for the 'resource' sub-component of DAL.
+#
+
+
+###############################################################################
+# DCE
+###############################################################################
+
+RESOURCE_DCE100 = dce100_resource.o
+
+AMD_DAL_RESOURCE_DCE100 = $(addprefix $(AMDDALPATH)/dc/resource/dce100/,$(RESOURCE_DCE100))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE100)
+
+###############################################################################
+
+RESOURCE_DCE110 = dce110_resource.o
+
+AMD_DAL_RESOURCE_DCE110 = $(addprefix $(AMDDALPATH)/dc/resource/dce110/,$(RESOURCE_DCE110))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE110)
+
+###############################################################################
+
+RESOURCE_DCE112 = dce112_resource.o
+
+AMD_DAL_RESOURCE_DCE112 = $(addprefix $(AMDDALPATH)/dc/resource/dce112/,$(RESOURCE_DCE112))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE112)
+
+###############################################################################
+
+RESOURCE_DCE120 = dce120_resource.o
+
+AMD_DAL_RESOURCE_DCE120 = $(addprefix $(AMDDALPATH)/dc/resource/dce120/,$(RESOURCE_DCE120))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE120)
+
+###############################################################################
+
+RESOURCE_DCE80 = dce80_resource.o
+
+AMD_DAL_RESOURCE_DCE80 = $(addprefix $(AMDDALPATH)/dc/resource/dce80/,$(RESOURCE_DCE80))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCE80)
+
+ifdef CONFIG_DRM_AMD_DC_FP
+###############################################################################
+# DCN
+###############################################################################
+
+RESOURCE_DCN10 = dcn10_resource.o
+
+AMD_DAL_RESOURCE_DCN10 = $(addprefix $(AMDDALPATH)/dc/resource/dcn10/,$(RESOURCE_DCN10))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN10)
+
+###############################################################################
+
+RESOURCE_DCN20 = dcn20_resource.o
+
+AMD_DAL_RESOURCE_DCN20 = $(addprefix $(AMDDALPATH)/dc/resource/dcn20/,$(RESOURCE_DCN20))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN20)
+
+###############################################################################
+
+RESOURCE_DCN201 = dcn201_resource.o
+
+AMD_DAL_RESOURCE_DCN201 = $(addprefix $(AMDDALPATH)/dc/resource/dcn201/,$(RESOURCE_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN201)
+
+###############################################################################
+
+RESOURCE_DCN21 = dcn21_resource.o
+
+AMD_DAL_RESOURCE_DCN21 = $(addprefix $(AMDDALPATH)/dc/resource/dcn21/,$(RESOURCE_DCN21))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN21)
+
+###############################################################################
+
+###############################################################################
+
+###############################################################################
+
+RESOURCE_DCN30 = dcn30_resource.o
+
+AMD_DAL_RESOURCE_DCN30 = $(addprefix $(AMDDALPATH)/dc/resource/dcn30/,$(RESOURCE_DCN30))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN30)
+
+###############################################################################
+
+RESOURCE_DCN301 = dcn301_resource.o
+
+AMD_DAL_RESOURCE_DCN301 = $(addprefix $(AMDDALPATH)/dc/resource/dcn301/,$(RESOURCE_DCN301))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN301)
+
+###############################################################################
+
+RESOURCE_DCN302 = dcn302_resource.o
+
+AMD_DAL_RESOURCE_DCN302 = $(addprefix $(AMDDALPATH)/dc/resource/dcn302/,$(RESOURCE_DCN302))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN302)
+
+###############################################################################
+
+RESOURCE_DCN303 = dcn303_resource.o
+
+AMD_DAL_RESOURCE_DCN303 = $(addprefix $(AMDDALPATH)/dc/resource/dcn303/,$(RESOURCE_DCN303))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN303)
+
+###############################################################################
+
+RESOURCE_DCN31 = dcn31_resource.o
+
+AMD_DAL_RESOURCE_DCN31 = $(addprefix $(AMDDALPATH)/dc/resource/dcn31/,$(RESOURCE_DCN31))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN31)
+
+###############################################################################
+
+RESOURCE_DCN314 = dcn314_resource.o
+
+AMD_DAL_RESOURCE_DCN314 = $(addprefix $(AMDDALPATH)/dc/resource/dcn314/,$(RESOURCE_DCN314))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN314)
+
+###############################################################################
+
+RESOURCE_DCN315 = dcn315_resource.o
+
+AMD_DAL_RESOURCE_DCN315 = $(addprefix $(AMDDALPATH)/dc/resource/dcn315/,$(RESOURCE_DCN315))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN315)
+
+###############################################################################
+
+RESOURCE_DCN316 = dcn316_resource.o
+
+AMD_DAL_RESOURCE_DCN316 = $(addprefix $(AMDDALPATH)/dc/resource/dcn316/,$(RESOURCE_DCN316))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN316)
+
+###############################################################################
+
+RESOURCE_DCN32 = dcn32_resource.o
+
+AMD_DAL_RESOURCE_DCN32 = $(addprefix $(AMDDALPATH)/dc/resource/dcn32/,$(RESOURCE_DCN32))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN32)
+
+###############################################################################
+
+RESOURCE_DCN321 = dcn321_resource.o
+
+AMD_DAL_RESOURCE_DCN321 = $(addprefix $(AMDDALPATH)/dc/resource/dcn321/,$(RESOURCE_DCN321))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN321)
+
+###############################################################################
+
+RESOURCE_DCN35 = dcn35_resource.o
+
+AMD_DAL_RESOURCE_DCN35 = $(addprefix $(AMDDALPATH)/dc/resource/dcn35/,$(RESOURCE_DCN35))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_RESOURCE_DCN35)
+
+###############################################################################
+
+###############################################################################
+
+endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
index 53a5f4cb6..53a5f4cb6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
index fecab7c56..fecab7c56 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
index fe518fd27..fe518fd27 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h
index aa4531e08..aa4531e08 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
index d1edac46c..d1edac46c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
index 1f57ebc6f..1f57ebc6f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
index 962de79be..20662edd0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
@@ -36,7 +36,7 @@
#include "dce110/dce110_resource.h"
#include "virtual/virtual_stream_encoder.h"
-#include "dce120_timing_generator.h"
+#include "dce120/dce120_timing_generator.h"
#include "irq/dce120/irq_service_dce120.h"
#include "dce/dce_opp.h"
#include "dce/dce_clock_source.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h
index 3d1f3cf01..3d1f3cf01 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt
new file mode 100644
index 000000000..19dd73bc9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/CMakeLists.txt
@@ -0,0 +1,4 @@
+dal3_subdirectory_sources(
+ dce80_resource.c
+ dce80_resource.h
+ ) \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
index 35a2cce0c..35a2cce0c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h
index eff31ab83..eff31ab83 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
index b94c5c97e..d08d10969 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c
@@ -26,29 +26,32 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn10_init.h"
+#include "dcn10/dcn10_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
-#include "dcn10_resource.h"
-#include "dcn10_ipp.h"
-#include "dcn10_mpc.h"
+#include "dcn10/dcn10_resource.h"
+#include "dcn10/dcn10_ipp.h"
+#include "dcn10/dcn10_mpc.h"
+
+#include "dcn10/dcn10_dwb.h"
+
#include "irq/dcn10/irq_service_dcn10.h"
-#include "dcn10_dpp.h"
-#include "dcn10_optc.h"
+#include "dcn10/dcn10_dpp.h"
+#include "dcn10/dcn10_optc.h"
#include "dcn10/dcn10_hwseq.h"
#include "dce110/dce110_hwseq.h"
-#include "dcn10_opp.h"
-#include "dcn10_link_encoder.h"
-#include "dcn10_stream_encoder.h"
+#include "dcn10/dcn10_opp.h"
+#include "dcn10/dcn10_link_encoder.h"
+#include "dcn10/dcn10_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
-#include "dcn10_hubp.h"
-#include "dcn10_hubbub.h"
+#include "dcn10/dcn10_hubp.h"
+#include "dcn10/dcn10_hubbub.h"
#include "dce/dce_panel_cntl.h"
#include "soc15_hw_ip.h"
@@ -1247,7 +1250,10 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
/* Store first available for MST second display
* in daisy chain use case
*/
- j = i;
+
+ if (pool->stream_enc[i]->id != ENGINE_ID_VIRTUAL)
+ j = i;
+
if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
index bf8e33cd8..bf8e33cd8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index e73e59754..f9c5bc624 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -29,7 +29,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn20_init.h"
+#include "dcn20/dcn20_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -39,29 +39,29 @@
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
-#include "dcn20_hubbub.h"
-#include "dcn20_mpc.h"
-#include "dcn20_hubp.h"
+#include "dcn20/dcn20_hubbub.h"
+#include "dcn20/dcn20_mpc.h"
+#include "dcn20/dcn20_hubp.h"
#include "irq/dcn20/irq_service_dcn20.h"
-#include "dcn20_dpp.h"
-#include "dcn20_optc.h"
+#include "dcn20/dcn20_dpp.h"
+#include "dcn20/dcn20_optc.h"
#include "dcn20/dcn20_hwseq.h"
#include "dce110/dce110_hwseq.h"
#include "dcn10/dcn10_resource.h"
-#include "dcn20_opp.h"
+#include "dcn20/dcn20_opp.h"
-#include "dcn20_dsc.h"
+#include "dcn20/dcn20_dsc.h"
-#include "dcn20_link_encoder.h"
-#include "dcn20_stream_encoder.h"
+#include "dcn20/dcn20_link_encoder.h"
+#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "virtual/virtual_stream_encoder.h"
#include "dce110/dce110_resource.h"
#include "dml/display_mode_vba.h"
-#include "dcn20_dccg.h"
-#include "dcn20_vmid.h"
+#include "dcn20/dcn20_dccg.h"
+#include "dcn20/dcn20_vmid.h"
#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
index 4cee3fa11..4cee3fa11 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
index bca22d867..914b234d7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c
@@ -26,7 +26,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn201_init.h"
+#include "dcn201/dcn201_init.h"
#include "dml/dcn20/dcn20_fpu.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -36,16 +36,16 @@
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
-#include "dcn201_mpc.h"
-#include "dcn201_hubp.h"
+#include "dcn201/dcn201_mpc.h"
+#include "dcn201/dcn201_hubp.h"
#include "irq/dcn201/irq_service_dcn201.h"
#include "dcn201/dcn201_dpp.h"
#include "dcn201/dcn201_hubbub.h"
-#include "dcn201_dccg.h"
-#include "dcn201_optc.h"
+#include "dcn201/dcn201_dccg.h"
+#include "dcn201/dcn201_optc.h"
#include "dcn201/dcn201_hwseq.h"
#include "dce110/dce110_hwseq.h"
-#include "dcn201_opp.h"
+#include "dcn201/dcn201_opp.h"
#include "dcn201/dcn201_link_encoder.h"
#include "dcn20/dcn20_stream_encoder.h"
#include "dce/dce_clock_source.h"
@@ -55,7 +55,7 @@
#include "dce110/dce110_resource.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
-#include "dcn201_hubbub.h"
+#include "dcn201/dcn201_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "cyan_skillfish_ip_offset.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h
index e0467d17d..e0467d17d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
index 42277b280..65d337731 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c
@@ -29,7 +29,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn21_init.h"
+#include "dcn21/dcn21_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -44,7 +44,7 @@
#include "dcn20/dcn20_hubbub.h"
#include "dcn20/dcn20_mpc.h"
#include "dcn20/dcn20_hubp.h"
-#include "dcn21_hubp.h"
+#include "dcn21/dcn21_hubp.h"
#include "irq/dcn21/irq_service_dcn21.h"
#include "dcn20/dcn20_dpp.h"
#include "dcn20/dcn20_optc.h"
@@ -61,7 +61,7 @@
#include "dml/display_mode_vba.h"
#include "dcn20/dcn20_dccg.h"
#include "dcn21/dcn21_dccg.h"
-#include "dcn21_hubbub.h"
+#include "dcn21/dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
#include "dce/dce_panel_cntl.h"
@@ -713,9 +713,8 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
pool->base.hubps[i] = NULL;
}
- if (pool->base.irqs != NULL) {
+ if (pool->base.irqs != NULL)
dal_irq_service_destroy(&pool->base.irqs);
- }
}
for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
index f7ecc002c..f7ecc002c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
index 7b259cb5f..37a64186f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn30_init.h"
+#include "dcn30/dcn30_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -1682,6 +1682,7 @@ noinline bool dcn30_internal_validate_bw(
* We don't actually support prefetch mode 2, so require that we
* at least support prefetch mode 1.
*/
+ context->bw_ctx.dml.validate_max_state = fast_validate;
context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank =
dm_allow_self_refresh;
@@ -1691,6 +1692,7 @@ noinline bool dcn30_internal_validate_bw(
memset(merge, 0, sizeof(merge));
vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge);
}
+ context->bw_ctx.dml.validate_max_state = false;
}
dml_log_mode_support_params(&context->bw_ctx.dml);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
index 8e6b8b736..8e6b8b736 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
index ce04caf35..7538b548c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn301_init.h"
+#include "dcn301/dcn301_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -61,7 +61,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
#include "dcn301/dcn301_dio_link_encoder.h"
-#include "dcn301_panel_cntl.h"
+#include "dcn301/dcn301_panel_cntl.h"
#include "vangogh_ip_offset.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h
index ae8672680..ae8672680 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
index 63ac984a0..5791b5cc2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
@@ -23,9 +23,9 @@
*
*/
-#include "dcn302_init.h"
+#include "dcn302/dcn302_init.h"
#include "dcn302_resource.h"
-#include "dcn302_dccg.h"
+#include "dcn302/dcn302_dccg.h"
#include "irq/dcn302/irq_service_dcn302.h"
#include "dcn30/dcn30_dio_link_encoder.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h
index 9f24e73b9..9f24e73b9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
index 49cb7fde4..25cd6236b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
@@ -23,9 +23,9 @@
* Authors: AMD
*/
-#include "dcn303_init.h"
+#include "dcn303/dcn303_init.h"
#include "dcn303_resource.h"
-#include "dcn303_dccg.h"
+#include "dcn303/dcn303_dccg.h"
#include "irq/dcn303/irq_service_dcn303.h"
#include "dcn30/dcn30_dio_link_encoder.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h
index 37cf15258..37cf15258 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
index 79416cfb2..31035fc3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
@@ -70,7 +70,7 @@
#include "dml/dcn31/dcn31_fpu.h"
#include "dcn31/dcn31_dccg.h"
#include "dcn10/dcn10_resource.h"
-#include "dcn31_panel_cntl.h"
+#include "dcn31/dcn31_panel_cntl.h"
#include "dcn30/dcn30_dwb.h"
#include "dcn30/dcn30_mmhubbub.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
index 901436591..901436591 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
index c97391edb..c97391edb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
index 49ffe7101..49ffe7101 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
index cb8024eee..515ba435f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c
@@ -1631,8 +1631,10 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context)
int i;
struct resource_context *res_ctx = &context->res_ctx;
- /*Don't apply for single stream*/
- if (context->stream_count < 2)
+ /* Only apply for dual stream scenarios with edp*/
+ if (context->stream_count != 2)
+ return false;
+ if (context->streams[0]->signal != SIGNAL_TYPE_EDP && context->streams[1]->signal != SIGNAL_TYPE_EDP)
return false;
for (i = 0; i < dc->res_pool->pipe_count; i++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h
index 22849eaa6..22849eaa6 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn315/dcn315_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
index b9753d460..b9753d460 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c
diff --git a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h
index aba6d6341..aba6d6341 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn316/dcn316_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
index f663de1cd..9042378fa 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
@@ -27,7 +27,7 @@
#include "dm_services.h"
#include "dc.h"
-#include "dcn32_init.h"
+#include "dcn32/dcn32_init.h"
#include "resource.h"
#include "include/irq_service_interface.h"
@@ -41,7 +41,7 @@
#include "dcn31/dcn31_hubbub.h"
#include "dcn32/dcn32_hubbub.h"
#include "dcn32/dcn32_mpc.h"
-#include "dcn32_hubp.h"
+#include "dcn32/dcn32_hubp.h"
#include "irq/dcn32/irq_service_dcn32.h"
#include "dcn32/dcn32_dpp.h"
#include "dcn32/dcn32_optc.h"
@@ -89,6 +89,8 @@
#include "dcn20/dcn20_vmid.h"
#include "dml/dcn32/dcn32_fpu.h"
+#include "dc_state_priv.h"
+
#include "dml2/dml2_wrapper.h"
#define DC_LOGGER_INIT(logger)
@@ -1644,7 +1646,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
if (curr_pipe->top_pipe && curr_pipe->top_pipe->plane_state == curr_pipe->plane_state)
phantom_plane = prev_phantom_plane;
else
- phantom_plane = dc_create_plane_state(dc);
+ phantom_plane = dc_state_create_phantom_plane(dc, context, curr_pipe->plane_state);
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
@@ -1665,9 +1667,7 @@ static void dcn32_enable_phantom_plane(struct dc *dc,
phantom_plane->clip_rect.y = 0;
phantom_plane->clip_rect.height = phantom_stream->src.height;
- phantom_plane->is_phantom = true;
-
- dc_add_plane_to_context(dc, phantom_stream, phantom_plane, context);
+ dc_state_add_phantom_plane(dc, phantom_stream, phantom_plane, context);
curr_pipe = curr_pipe->bottom_pipe;
prev_phantom_plane = phantom_plane;
@@ -1683,13 +1683,7 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
struct dc_stream_state *phantom_stream = NULL;
struct pipe_ctx *ref_pipe = &context->res_ctx.pipe_ctx[dc_pipe_idx];
- phantom_stream = dc_create_stream_for_sink(ref_pipe->stream->sink);
- phantom_stream->signal = SIGNAL_TYPE_VIRTUAL;
- phantom_stream->dpms_off = true;
- phantom_stream->mall_stream_config.type = SUBVP_PHANTOM;
- phantom_stream->mall_stream_config.paired_stream = ref_pipe->stream;
- ref_pipe->stream->mall_stream_config.type = SUBVP_MAIN;
- ref_pipe->stream->mall_stream_config.paired_stream = phantom_stream;
+ phantom_stream = dc_state_create_phantom_stream(dc, context, ref_pipe->stream);
/* stream has limited viewport and small timing */
memcpy(&phantom_stream->timing, &ref_pipe->stream->timing, sizeof(phantom_stream->timing));
@@ -1699,81 +1693,10 @@ static struct dc_stream_state *dcn32_enable_phantom_stream(struct dc *dc,
dcn32_set_phantom_stream_timing(dc, context, ref_pipe, phantom_stream, pipes, pipe_cnt, dc_pipe_idx);
DC_FP_END();
- dc_add_stream_to_ctx(dc, context, phantom_stream);
+ dc_state_add_phantom_stream(dc, context, phantom_stream, ref_pipe->stream);
return phantom_stream;
}
-void dcn32_retain_phantom_pipes(struct dc *dc, struct dc_state *context)
-{
- int i;
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_stream_state *phantom_stream = NULL;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
-
- if (resource_is_pipe_type(pipe, OTG_MASTER) &&
- resource_is_pipe_type(pipe, DPP_PIPE) &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
- phantom_stream = pipe->stream;
-
- dc_plane_state_retain(phantom_plane);
- dc_stream_retain(phantom_stream);
- }
- }
-}
-
-// return true if removed piped from ctx, false otherwise
-bool dcn32_remove_phantom_pipes(struct dc *dc, struct dc_state *context, bool fast_update)
-{
- int i;
- bool removed_pipe = false;
- struct dc_plane_state *phantom_plane = NULL;
- struct dc_stream_state *phantom_stream = NULL;
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- // build scaling params for phantom pipes
- if (pipe->plane_state && pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
- phantom_plane = pipe->plane_state;
- phantom_stream = pipe->stream;
-
- dc_rem_all_planes_for_stream(dc, pipe->stream, context);
- dc_remove_stream_from_ctx(dc, context, pipe->stream);
-
- /* Ref count is incremented on allocation and also when added to the context.
- * Therefore we must call release for the the phantom plane and stream once
- * they are removed from the ctx to finally decrement the refcount to 0 to free.
- */
- dc_plane_state_release(phantom_plane);
- dc_stream_release(phantom_stream);
-
- removed_pipe = true;
- }
-
- /* For non-full updates, a shallow copy of the current state
- * is created. In this case we don't want to erase the current
- * state (there can be 2 HIRQL threads, one in flip, and one in
- * checkMPO) that can cause a race condition.
- *
- * This is just a workaround, needs a proper fix.
- */
- if (!fast_update) {
- // Clear all phantom stream info
- if (pipe->stream) {
- pipe->stream->mall_stream_config.type = SUBVP_NONE;
- pipe->stream->mall_stream_config.paired_stream = NULL;
- }
-
- if (pipe->plane_state) {
- pipe->plane_state->is_phantom = false;
- }
- }
- }
- return removed_pipe;
-}
-
/* TODO: Input to this function should indicate which pipe indexes (or streams)
* require a phantom pipe / stream
*/
@@ -1798,7 +1721,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context,
// We determine which phantom pipes were added by comparing with
// the phantom stream.
if (pipe->plane_state && pipe->stream && pipe->stream == phantom_stream &&
- pipe->stream->mall_stream_config.type == SUBVP_PHANTOM) {
+ dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) {
pipe->stream->use_dynamic_meta = false;
pipe->plane_state->flip_immediate = false;
if (!resource_build_scaling_params(pipe)) {
@@ -1817,7 +1740,6 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
int vlevel = 0;
int pipe_cnt = 0;
display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
- struct mall_temp_config mall_temp_config;
/* To handle Freesync properly, setting FreeSync DML parameters
* to its default state for the first stage of validation
@@ -1827,29 +1749,12 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val
DC_LOGGER_INIT(dc->ctx->logger);
- /* For fast validation, there are situations where a shallow copy of
- * of the dc->current_state is created for the validation. In this case
- * we want to save and restore the mall config because we always
- * teardown subvp at the beginning of validation (and don't attempt
- * to add it back if it's fast validation). If we don't restore the
- * subvp config in cases of fast validation + shallow copy of the
- * dc->current_state, the dc->current_state will have a partially
- * removed subvp state when we did not intend to remove it.
- */
- if (fast_validate) {
- memset(&mall_temp_config, 0, sizeof(mall_temp_config));
- dcn32_save_mall_state(dc, context, &mall_temp_config);
- }
-
BW_VAL_TRACE_COUNT();
DC_FP_START();
out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
DC_FP_END();
- if (fast_validate)
- dcn32_restore_mall_state(dc, context, &mall_temp_config);
-
if (pipe_cnt == 0)
goto validate_out;
@@ -1948,7 +1853,7 @@ int dcn32_populate_dml_pipes_from_context(
* This is just a workaround -- needs a proper fix.
*/
if (!fast_validate) {
- switch (pipe->stream->mall_stream_config.type) {
+ switch (dc_state_get_pipe_subvp_type(context, pipe)) {
case SUBVP_MAIN:
pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport;
subvp_in_use = true;
@@ -2054,10 +1959,6 @@ static struct resource_funcs dcn32_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
- .remove_phantom_pipes = dcn32_remove_phantom_pipes,
- .retain_phantom_pipes = dcn32_retain_phantom_pipes,
- .save_mall_state = dcn32_save_mall_state,
- .restore_mall_state = dcn32_restore_mall_state,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
};
@@ -2471,16 +2372,19 @@ static bool dcn32_resource_construct(
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
- dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
- dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
- dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
- dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
- dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
- dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+ dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
index 351c8a284..2258c5c72 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h
@@ -39,6 +39,7 @@
#define DCN3_2_MBLK_HEIGHT_8BPE 64
#define DCN3_2_DCFCLK_DS_INIT_KHZ 10000 // Choose 10Mhz for init DCFCLK DS freq
#define SUBVP_HIGH_REFRESH_LIST_LEN 4
+#define SUBVP_ACTIVE_MARGIN_LIST_LEN 2
#define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800
#define DCN3_2_VMIN_DISPCLK_HZ 717000000
#define MIN_SUBVP_DCFCLK_KHZ 400000
@@ -58,6 +59,15 @@ struct subvp_high_refresh_list {
} res[SUBVP_HIGH_REFRESH_LIST_LEN];
};
+struct subvp_active_margin_list {
+ int min_refresh;
+ int max_refresh;
+ struct {
+ int width;
+ int height;
+ } res[SUBVP_ACTIVE_MARGIN_LIST_LEN];
+};
+
struct dcn32_resource_pool {
struct resource_pool base;
};
@@ -82,12 +92,6 @@ bool dcn32_release_post_bldn_3dlut(
struct dc_3dlut **lut,
struct dc_transfer_func **shaper);
-bool dcn32_remove_phantom_pipes(struct dc *dc,
- struct dc_state *context, bool fast_update);
-
-void dcn32_retain_phantom_pipes(struct dc *dc,
- struct dc_state *context);
-
void dcn32_add_phantom_pipes(struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -160,15 +164,7 @@ void dcn32_determine_det_override(struct dc *dc,
void dcn32_set_det_allocations(struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes);
-void dcn32_save_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config);
-
-void dcn32_restore_mall_state(struct dc *dc,
- struct dc_state *context,
- struct mall_temp_config *temp_config);
-
-struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, const struct dc_state *context);
+struct dc_stream_state *dcn32_can_support_mclk_switch_using_fw_based_vblank_stretch(struct dc *dc, struct dc_state *context);
bool dcn32_allow_subvp_with_active_margin(struct pipe_ctx *pipe);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
index 3b7505b5f..f4dd6443a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
@@ -63,7 +63,7 @@
#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
#include "dcn32/dcn32_dio_link_encoder.h"
-#include "dcn321_dio_link_encoder.h"
+#include "dcn321/dcn321_dio_link_encoder.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -92,6 +92,8 @@
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "dc_state_priv.h"
+
#define DC_LOGGER_INIT(logger)
enum dcn321_clk_src_array_id {
@@ -1607,10 +1609,6 @@ static struct resource_funcs dcn321_res_pool_funcs = {
.patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.add_phantom_pipes = dcn32_add_phantom_pipes,
- .remove_phantom_pipes = dcn32_remove_phantom_pipes,
- .retain_phantom_pipes = dcn32_retain_phantom_pipes,
- .save_mall_state = dcn32_save_mall_state,
- .restore_mall_state = dcn32_restore_mall_state,
.build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params,
};
@@ -2010,16 +2008,19 @@ static bool dcn321_resource_construct(
dc->dml2_options.callbacks.get_opp_head = &resource_get_opp_head;
dc->dml2_options.svp_pstate.callbacks.dc = dc;
- dc->dml2_options.svp_pstate.callbacks.add_plane_to_context = &dc_add_plane_to_context;
- dc->dml2_options.svp_pstate.callbacks.add_stream_to_ctx = &dc_add_stream_to_ctx;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_plane = &dc_state_add_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.add_phantom_stream = &dc_state_add_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.build_scaling_params = &resource_build_scaling_params;
- dc->dml2_options.svp_pstate.callbacks.create_plane = &dc_create_plane_state;
- dc->dml2_options.svp_pstate.callbacks.remove_plane_from_context = &dc_remove_plane_from_context;
- dc->dml2_options.svp_pstate.callbacks.remove_stream_from_ctx = &dc_remove_stream_from_ctx;
- dc->dml2_options.svp_pstate.callbacks.create_stream_for_sink = &dc_create_stream_for_sink;
- dc->dml2_options.svp_pstate.callbacks.plane_state_release = &dc_plane_state_release;
- dc->dml2_options.svp_pstate.callbacks.stream_release = &dc_stream_release;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_plane = &dc_state_create_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_plane = &dc_state_remove_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.remove_phantom_stream = &dc_state_remove_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.create_phantom_stream = &dc_state_create_phantom_stream;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_plane = &dc_state_release_phantom_plane;
+ dc->dml2_options.svp_pstate.callbacks.release_phantom_stream = &dc_state_release_phantom_stream;
dc->dml2_options.svp_pstate.callbacks.release_dsc = &dcn20_release_dsc;
+ dc->dml2_options.svp_pstate.callbacks.get_pipe_subvp_type = &dc_state_get_pipe_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_stream_subvp_type = &dc_state_get_stream_subvp_type;
+ dc->dml2_options.svp_pstate.callbacks.get_paired_subvp_stream = &dc_state_get_paired_subvp_stream;
dc->dml2_options.svp_pstate.subvp_fw_processing_delay_us = dc->caps.subvp_fw_processing_delay_us;
dc->dml2_options.svp_pstate.subvp_prefetch_end_to_mall_start_us = dc->caps.subvp_prefetch_end_to_mall_start_us;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h
index 82cbf009f..82cbf009f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.h
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
index 70ef1e7ff..78c315541 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
@@ -78,7 +78,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn31/dcn31_panel_cntl.h"
#include "dcn35/dcn35_hwseq.h"
-#include "dcn35_dio_link_encoder.h"
+#include "dcn35/dcn35_dio_link_encoder.h"
#include "dml/dcn31/dcn31_fpu.h" /*todo*/
#include "dml/dcn35/dcn35_fpu.h"
#include "dcn35/dcn35_dwb.h"
@@ -96,12 +96,15 @@
#include "reg_helper.h"
#include "dce/dmub_abm.h"
#include "dce/dmub_psr.h"
+#include "dce/dmub_replay.h"
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "dml/dcn31/display_mode_vba_31.h" /*temp*/
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
+#include "dc_state_priv.h"
+
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -626,7 +629,19 @@ static struct dce_hwseq_registers hwseq_reg;
HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_ROOT_GATE_DISABLE, mask_sh), \
- HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh)
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DTBCLK_P3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL5, DPSTREAMCLK3_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK0_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK1_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK2_GATE_DISABLE, mask_sh),\
+ HWS_SF(, DCCG_GATE_DISABLE_CNTL4, DPIASYMCLK3_GATE_DISABLE, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN35_MASK_SH_LIST(__SHIFT)
@@ -686,7 +701,7 @@ static const struct dc_plane_cap plane_cap = {
// 6:1 downscaling ratio: 1000/6 = 166.666
.max_downscale_factor = {
- .argb8888 = 167,
+ .argb8888 = 250,
.nv12 = 167,
.fp16 = 167
},
@@ -705,7 +720,9 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.disable_dpp_power_gate = true,
.disable_hubp_power_gate = true,
- .disable_clock_gate = true,
+ .disable_optc_power_gate = true, /*should the same as above two*/
+ .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/
+ .disable_clock_gate = false,
.disable_dsc_power_gate = true,
.vsr_support = true,
.performance_trace = false,
@@ -724,7 +741,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
.dscl = true,
- .cm = false,
+ .cm = true,
.mpc = true,
.optc = true,
.vpg = true,
@@ -752,7 +769,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.enable_hpo_pg_support = false,
.enable_legacy_fast_update = true,
.enable_single_display_2to1_odm_policy = false,
- .disable_idle_power_optimizations = true,
+ .disable_idle_power_optimizations = false,
.dmcub_emulation = false,
.disable_boot_optimizations = false,
.disable_unbounded_requesting = false,
@@ -763,14 +780,17 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_z10 = false,
.ignore_pg = true,
.psp_disabled_wa = true,
- .ips2_eval_delay_us = 200,
- .ips2_entry_delay_us = 400
+ .ips2_eval_delay_us = 2000,
+ .ips2_entry_delay_us = 800,
+ .disable_dmub_reallow_idle = true,
+ .static_screen_wait_frames = 2,
};
static const struct dc_panel_config panel_config_defaults = {
.psr = {
.disable_psr = false,
.disallow_psrsu = false,
+ .disallow_replay = false,
},
.ilr = {
.optimize_edp_link_rate = true,
@@ -1529,6 +1549,9 @@ static void dcn35_resource_destruct(struct dcn35_resource_pool *pool)
if (pool->base.psr != NULL)
dmub_psr_destroy(&pool->base.psr);
+ if (pool->base.replay != NULL)
+ dmub_replay_destroy(&pool->base.replay);
+
if (pool->base.pg_cntl != NULL)
dcn_pg_cntl_destroy(&pool->base.pg_cntl);
@@ -2013,6 +2036,14 @@ static bool dcn35_resource_construct(
goto create_fail;
}
+ /* Replay */
+ pool->base.replay = dmub_replay_create(ctx);
+ if (pool->base.replay == NULL) {
+ dm_error("DC: failed to create replay obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+
/* ABM */
for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
pool->base.multiple_abms[i] = dmub_abm_create(ctx,
@@ -2100,6 +2131,7 @@ static bool dcn35_resource_construct(
dc->dml2_options.dcn_pipe_count = pool->base.pipe_count;
dc->dml2_options.use_native_pstate_optimization = true;
dc->dml2_options.use_native_soc_bb_construction = true;
+ dc->dml2_options.minimize_dispclk_using_odm = false;
if (dc->config.EnableMinDispClkODM)
dc->dml2_options.minimize_dispclk_using_odm = true;
dc->dml2_options.enable_windowed_mpo_odm = dc->config.enable_windowed_mpo_odm;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
index 99aea102e..a51c4a9ea 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.h
@@ -166,6 +166,7 @@ struct resource_pool *dcn35_create_resource_pool(
SR(MMHUBBUB_MEM_PWR_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL), \
SR(DCCG_GATE_DISABLE_CNTL2), \
+ SR(DCCG_GATE_DISABLE_CNTL4), \
SR(DCCG_GATE_DISABLE_CNTL5), \
SR(DCFCLK_CNTL),\
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \